All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.bytedeco.javacpp.tensorflow Maven / Gradle / Ivy

The newest version!
// Targeted by JavaCPP version 1.4.4: DO NOT EDIT THIS FILE

package org.bytedeco.javacpp;

import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;

public class tensorflow extends org.bytedeco.javacpp.helper.tensorflow {
    static { Loader.load(); }

@Name("tensorflow::gtl::InlinedVector") public static class AllocatorAttributesVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public AllocatorAttributesVector(Pointer p) { super(p); }
    public AllocatorAttributesVector()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef AllocatorAttributesVector put(@ByRef AllocatorAttributesVector x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @ByRef AllocatorAttributes get(@Cast("size_t") long i);
    public native AllocatorAttributesVector put(@Cast("size_t") long i, AllocatorAttributes value);
}

@Name("tensorflow::gtl::InlinedVector") public static class AllocRecordVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public AllocRecordVector(Pointer p) { super(p); }
    public AllocRecordVector()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef AllocRecordVector put(@ByRef AllocRecordVector x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @ByRef AllocRecord get(@Cast("size_t") long i);
    public native AllocRecordVector put(@Cast("size_t") long i, AllocRecord value);
}

@Name("tensorflow::gtl::InlinedVector") public static class DeviceContextInlinedVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public DeviceContextInlinedVector(Pointer p) { super(p); }
    public DeviceContextInlinedVector()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef DeviceContextInlinedVector put(@ByRef DeviceContextInlinedVector x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native DeviceContext get(@Cast("size_t") long i);
    public native DeviceContextInlinedVector put(@Cast("size_t") long i, DeviceContext value);
}

@Name("tensorflow::gtl::InlinedVector") public static class DeviceTypeVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public DeviceTypeVector(Pointer p) { super(p); }
    public DeviceTypeVector()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef DeviceTypeVector put(@ByRef DeviceTypeVector x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @ByRef DeviceType get(@Cast("size_t") long i);
    public native DeviceTypeVector put(@Cast("size_t") long i, DeviceType value);
}

@Name("tensorflow::gtl::InlinedVector") public static class TensorValueVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public TensorValueVector(Pointer p) { super(p); }
    public TensorValueVector()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef TensorValueVector put(@ByRef TensorValueVector x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @ByRef TensorValue get(@Cast("size_t") long i);
    public native TensorValueVector put(@Cast("size_t") long i, TensorValue value);
}

@Name("tensorflow::gtl::InlinedVector") public static class WrappedAllocatorVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public WrappedAllocatorVector(Pointer p) { super(p); }
    public WrappedAllocatorVector()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef WrappedAllocatorVector put(@ByRef WrappedAllocatorVector x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @ByRef WrappedAllocator get(@Cast("size_t") long i);
    public native WrappedAllocatorVector put(@Cast("size_t") long i, WrappedAllocator value);
}

@Name("tensorflow::gtl::InlinedVector") public static class LongVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public LongVector(Pointer p) { super(p); }
    public LongVector()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef LongVector put(@ByRef LongVector x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @Cast("tensorflow::int64") long get(@Cast("size_t") long i);
    public native LongVector put(@Cast("size_t") long i, long value);
}

@Name("tensorflow::gtl::InlinedVector") public static class DataTypeVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public DataTypeVector(Pointer p) { super(p); }
    public DataTypeVector()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef DataTypeVector put(@ByRef DataTypeVector x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @Cast("tensorflow::DataType") int get(@Cast("size_t") long i);
    public native DataTypeVector put(@Cast("size_t") long i, int value);
}

@Name("google::protobuf::Map") public static class StringStringMap extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringStringMap(Pointer p) { super(p); }
    public StringStringMap()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef StringStringMap put(@ByRef StringStringMap x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @StdString BytePointer get(@StdString BytePointer i);
    public native StringStringMap put(@StdString BytePointer i, BytePointer value);
    @ValueSetter @Index public native StringStringMap put(@StdString BytePointer i, @StdString String value);

    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*().first") @MemberGetter @StdString BytePointer first();
        public native @Name("operator*().second") @MemberGetter @StdString BytePointer second();
    }
}

@Name("google::protobuf::Map") public static class StringIntMap extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringIntMap(Pointer p) { super(p); }
    public StringIntMap()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef StringIntMap put(@ByRef StringIntMap x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @Cast("google::protobuf::int32") int get(@StdString BytePointer i);
    public native StringIntMap put(@StdString BytePointer i, int value);

    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*().first") @MemberGetter @StdString BytePointer first();
        public native @Name("operator*().second") @MemberGetter @Cast("google::protobuf::int32") int second();
    }
}

@Name("google::protobuf::Map") public static class IntStringMap extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public IntStringMap(Pointer p) { super(p); }
    public IntStringMap()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef IntStringMap put(@ByRef IntStringMap x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @StdString BytePointer get(int i);
    public native IntStringMap put(int i, BytePointer value);
    @ValueSetter @Index public native IntStringMap put(int i, @StdString String value);

    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*().first") @MemberGetter int first();
        public native @Name("operator*().second") @MemberGetter @StdString BytePointer second();
    }
}

@Name("google::protobuf::Map") public static class StringFeatureMap extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringFeatureMap(Pointer p) { super(p); }
    public StringFeatureMap()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef StringFeatureMap put(@ByRef StringFeatureMap x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @ByRef Feature get(@StdString BytePointer i);
    public native StringFeatureMap put(@StdString BytePointer i, Feature value);

    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*().first") @MemberGetter @StdString BytePointer first();
        public native @Name("operator*().second") @MemberGetter @ByRef @Const Feature second();
    }
}

@Name("google::protobuf::Map") public static class StringFeatureListMap extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringFeatureListMap(Pointer p) { super(p); }
    public StringFeatureListMap()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef StringFeatureListMap put(@ByRef StringFeatureListMap x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @ByRef FeatureList get(@StdString BytePointer i);
    public native StringFeatureListMap put(@StdString BytePointer i, FeatureList value);

    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*().first") @MemberGetter @StdString BytePointer first();
        public native @Name("operator*().second") @MemberGetter @ByRef @Const FeatureList second();
    }
}

@Name("google::protobuf::Map") public static class StringCollectionDefMap extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringCollectionDefMap(Pointer p) { super(p); }
    public StringCollectionDefMap()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef StringCollectionDefMap put(@ByRef StringCollectionDefMap x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @ByRef CollectionDef get(@StdString BytePointer i);
    public native StringCollectionDefMap put(@StdString BytePointer i, CollectionDef value);

    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*().first") @MemberGetter @StdString BytePointer first();
        public native @Name("operator*().second") @MemberGetter @ByRef @Const CollectionDef second();
    }
}

@Name("google::protobuf::Map") public static class StringSignatureDefMap extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringSignatureDefMap(Pointer p) { super(p); }
    public StringSignatureDefMap()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef StringSignatureDefMap put(@ByRef StringSignatureDefMap x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @ByRef SignatureDef get(@StdString BytePointer i);
    public native StringSignatureDefMap put(@StdString BytePointer i, SignatureDef value);

    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*().first") @MemberGetter @StdString BytePointer first();
        public native @Name("operator*().second") @MemberGetter @ByRef @Const SignatureDef second();
    }
}

@Name("google::protobuf::Map") public static class StringTensorInfoMap extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringTensorInfoMap(Pointer p) { super(p); }
    public StringTensorInfoMap()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef StringTensorInfoMap put(@ByRef StringTensorInfoMap x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @ByRef TensorInfo get(@StdString BytePointer i);
    public native StringTensorInfoMap put(@StdString BytePointer i, TensorInfo value);

    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*().first") @MemberGetter @StdString BytePointer first();
        public native @Name("operator*().second") @MemberGetter @ByRef @Const TensorInfo second();
    }
}

@Name("google::protobuf::Map") public static class StringAttrValueMap extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringAttrValueMap(Pointer p) { super(p); }
    public StringAttrValueMap()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef StringAttrValueMap put(@ByRef StringAttrValueMap x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @ByRef AttrValue get(@StdString BytePointer i);
    public native StringAttrValueMap put(@StdString BytePointer i, AttrValue value);

    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*().first") @MemberGetter @StdString BytePointer first();
        public native @Name("operator*().second") @MemberGetter @ByRef @Const AttrValue second();
    }
}

@Name("tensorflow::gtl::FlatMap,tensorflow::hash >") public static class NameRangeMap extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public NameRangeMap(Pointer p) { super(p); }
    public NameRangeMap()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef NameRangeMap put(@ByRef NameRangeMap x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index(function = "at") public native int first(@StringPiece BytePointer i); public native NameRangeMap first(@StringPiece BytePointer i, int first);
    @Index(function = "at") public native int second(@StringPiece BytePointer i);  public native NameRangeMap second(@StringPiece BytePointer i, int second);
}

@Name("tensorflow::gtl::FlatMap") public static class TF_SessionStringMap extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public TF_SessionStringMap(Pointer p) { super(p); }
    public TF_SessionStringMap()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef TF_SessionStringMap put(@ByRef TF_SessionStringMap x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @StdString BytePointer get(TF_Session i);
    public native TF_SessionStringMap put(TF_Session i, BytePointer value);
    @ValueSetter @Index public native TF_SessionStringMap put(TF_Session i, @StdString String value);

    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*().first") @MemberGetter @Const TF_Session first();
        public native @Name("operator*().second") @MemberGetter @StdString BytePointer second();
    }
}

@Name("std::list") public static class StringList extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringList(Pointer p) { super(p); }
    public StringList()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef StringList put(@ByRef StringList x);

    public boolean empty() { return size() == 0; }
    public native long size();

    public native @ByVal Iterator insert(@ByVal Iterator pos, @StdString BytePointer value);
    public native @ByVal Iterator erase(@ByVal Iterator pos);
    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*") @StdString BytePointer get();
    }
}

@Name("std::map") public static class TensorIdTensorIdMap extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public TensorIdTensorIdMap(Pointer p) { super(p); }
    public TensorIdTensorIdMap()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef TensorIdTensorIdMap put(@ByRef TensorIdTensorIdMap x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @ByRef TensorId get(@ByRef TensorId i);
    public native TensorIdTensorIdMap put(@ByRef TensorId i, TensorId value);

    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*().first") @MemberGetter @ByRef @Const TensorId first();
        public native @Name("operator*().second") @MemberGetter @ByRef @Const TensorId second();
    }
}

@Name("std::map") public static class SafeTensorIdTensorIdMap extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public SafeTensorIdTensorIdMap(Pointer p) { super(p); }
    public SafeTensorIdTensorIdMap()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef SafeTensorIdTensorIdMap put(@ByRef SafeTensorIdTensorIdMap x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @ByRef SafeTensorId get(@ByRef SafeTensorId i);
    public native SafeTensorIdTensorIdMap put(@ByRef SafeTensorId i, SafeTensorId value);

    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*().first") @MemberGetter @ByRef @Const SafeTensorId first();
        public native @Name("operator*().second") @MemberGetter @ByRef @Const SafeTensorId second();
    }
}

@Name("std::set") public static class StringSet extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringSet(Pointer p) { super(p); }
    public StringSet()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef StringSet put(@ByRef StringSet x);

    public boolean empty() { return size() == 0; }
    public native long size();

    public native void insert(@StdString BytePointer value);
    public native void erase(@StdString BytePointer value);
    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*") @StdString BytePointer get();
    }
}

@Name("std::vector") public static class StringPieceVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringPieceVector(Pointer p) { super(p); }
    public StringPieceVector(BytePointer value) { this(1); put(0, value); }
    public StringPieceVector(BytePointer ... array) { this(array.length); put(array); }
    public StringPieceVector(String value) { this(1); put(0, value); }
    public StringPieceVector(String ... array) { this(array.length); put(array); }
    public StringPieceVector()       { allocate();  }
    public StringPieceVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator=") @ByRef StringPieceVector put(@ByRef StringPieceVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);

    @Index(function = "at") public native @StringPiece BytePointer get(@Cast("size_t") long i);
    public native StringPieceVector put(@Cast("size_t") long i, BytePointer value);
    @ValueSetter @Index(function = "at") public native StringPieceVector put(@Cast("size_t") long i, @StringPiece String value);

    public native @ByVal Iterator insert(@ByVal Iterator pos, @StringPiece BytePointer value);
    public native @ByVal Iterator erase(@ByVal Iterator pos);
    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*") @StringPiece BytePointer get();
    }

    public BytePointer[] get() {
        BytePointer[] array = new BytePointer[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE];
        for (int i = 0; i < array.length; i++) {
            array[i] = get(i);
        }
        return array;
    }
    @Override public String toString() {
        return java.util.Arrays.toString(get());
    }

    public BytePointer pop_back() {
        long size = size();
        BytePointer value = get(size - 1);
        resize(size - 1);
        return value;
    }
    public StringPieceVector push_back(BytePointer value) {
        long size = size();
        resize(size + 1);
        return put(size, value);
    }
    public StringPieceVector put(BytePointer value) {
        if (size() != 1) { resize(1); }
        return put(0, value);
    }
    public StringPieceVector put(BytePointer ... array) {
        if (size() != array.length) { resize(array.length); }
        for (int i = 0; i < array.length; i++) {
            put(i, array[i]);
        }
        return this;
    }

    public StringPieceVector push_back(String value) {
        long size = size();
        resize(size + 1);
        return put(size, value);
    }
    public StringPieceVector put(String value) {
        if (size() != 1) { resize(1); }
        return put(0, value);
    }
    public StringPieceVector put(String ... array) {
        if (size() != array.length) { resize(array.length); }
        for (int i = 0; i < array.length; i++) {
            put(i, array[i]);
        }
        return this;
    }
}

@Name("std::vector") public static class StringVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringVector(Pointer p) { super(p); }
    public StringVector(BytePointer value) { this(1); put(0, value); }
    public StringVector(BytePointer ... array) { this(array.length); put(array); }
    public StringVector(String value) { this(1); put(0, value); }
    public StringVector(String ... array) { this(array.length); put(array); }
    public StringVector()       { allocate();  }
    public StringVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator=") @ByRef StringVector put(@ByRef StringVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);

    @Index(function = "at") public native @StdString BytePointer get(@Cast("size_t") long i);
    public native StringVector put(@Cast("size_t") long i, BytePointer value);
    @ValueSetter @Index(function = "at") public native StringVector put(@Cast("size_t") long i, @StdString String value);

    public native @ByVal Iterator insert(@ByVal Iterator pos, @StdString BytePointer value);
    public native @ByVal Iterator erase(@ByVal Iterator pos);
    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*") @StdString BytePointer get();
    }

    public BytePointer[] get() {
        BytePointer[] array = new BytePointer[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE];
        for (int i = 0; i < array.length; i++) {
            array[i] = get(i);
        }
        return array;
    }
    @Override public String toString() {
        return java.util.Arrays.toString(get());
    }

    public BytePointer pop_back() {
        long size = size();
        BytePointer value = get(size - 1);
        resize(size - 1);
        return value;
    }
    public StringVector push_back(BytePointer value) {
        long size = size();
        resize(size + 1);
        return put(size, value);
    }
    public StringVector put(BytePointer value) {
        if (size() != 1) { resize(1); }
        return put(0, value);
    }
    public StringVector put(BytePointer ... array) {
        if (size() != array.length) { resize(array.length); }
        for (int i = 0; i < array.length; i++) {
            put(i, array[i]);
        }
        return this;
    }

    public StringVector push_back(String value) {
        long size = size();
        resize(size + 1);
        return put(size, value);
    }
    public StringVector put(String value) {
        if (size() != 1) { resize(1); }
        return put(0, value);
    }
    public StringVector put(String ... array) {
        if (size() != array.length) { resize(array.length); }
        for (int i = 0; i < array.length; i++) {
            put(i, array[i]);
        }
        return this;
    }
}

@Name("std::vector >") public static class StringStringPairVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringStringPairVector(Pointer p) { super(p); }
    public StringStringPairVector(BytePointer[] firstValue, BytePointer[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); }
    public StringStringPairVector(String[] firstValue, String[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); }
    public StringStringPairVector()       { allocate();  }
    public StringStringPairVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator=") @ByRef StringStringPairVector put(@ByRef StringStringPairVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);

    @Index(function = "at") public native @StdString BytePointer first(@Cast("size_t") long i); public native StringStringPairVector first(@Cast("size_t") long i, BytePointer first);
    @Index(function = "at") public native @StdString BytePointer second(@Cast("size_t") long i);  public native StringStringPairVector second(@Cast("size_t") long i, BytePointer second);
    @MemberSetter @Index(function = "at") public native StringStringPairVector first(@Cast("size_t") long i, @StdString String first);
    @MemberSetter @Index(function = "at") public native StringStringPairVector second(@Cast("size_t") long i, @StdString String second);

    public StringStringPairVector put(BytePointer[] firstValue, BytePointer[] secondValue) {
        for (int i = 0; i < firstValue.length && i < secondValue.length; i++) {
            first(i, firstValue[i]);
            second(i, secondValue[i]);
        }
        return this;
    }

    public StringStringPairVector put(String[] firstValue, String[] secondValue) {
        for (int i = 0; i < firstValue.length && i < secondValue.length; i++) {
            first(i, firstValue[i]);
            second(i, secondValue[i]);
        }
        return this;
    }
}

@Name("std::vector") public static class DeviceVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public DeviceVector(Pointer p) { super(p); }
    public DeviceVector(Device value) { this(1); put(0, value); }
    public DeviceVector(Device ... array) { this(array.length); put(array); }
    public DeviceVector()       { allocate();  }
    public DeviceVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator=") @ByRef DeviceVector put(@ByRef DeviceVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);

    @Index(function = "at") public native Device get(@Cast("size_t") long i);
    public native DeviceVector put(@Cast("size_t") long i, Device value);

    public native @ByVal Iterator insert(@ByVal Iterator pos, Device value);
    public native @ByVal Iterator erase(@ByVal Iterator pos);
    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*") @Const Device get();
    }

    public Device[] get() {
        Device[] array = new Device[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE];
        for (int i = 0; i < array.length; i++) {
            array[i] = get(i);
        }
        return array;
    }
    @Override public String toString() {
        return java.util.Arrays.toString(get());
    }

    public Device pop_back() {
        long size = size();
        Device value = get(size - 1);
        resize(size - 1);
        return value;
    }
    public DeviceVector push_back(Device value) {
        long size = size();
        resize(size + 1);
        return put(size, value);
    }
    public DeviceVector put(Device value) {
        if (size() != 1) { resize(1); }
        return put(0, value);
    }
    public DeviceVector put(Device ... array) {
        if (size() != array.length) { resize(array.length); }
        for (int i = 0; i < array.length; i++) {
            put(i, array[i]);
        }
        return this;
    }
}

@Name("std::vector") public static class DeviceContextVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public DeviceContextVector(Pointer p) { super(p); }
    public DeviceContextVector(DeviceContext value) { this(1); put(0, value); }
    public DeviceContextVector(DeviceContext ... array) { this(array.length); put(array); }
    public DeviceContextVector()       { allocate();  }
    public DeviceContextVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator=") @ByRef DeviceContextVector put(@ByRef DeviceContextVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);

    @Index(function = "at") public native DeviceContext get(@Cast("size_t") long i);
    public native DeviceContextVector put(@Cast("size_t") long i, DeviceContext value);

    public native @ByVal Iterator insert(@ByVal Iterator pos, DeviceContext value);
    public native @ByVal Iterator erase(@ByVal Iterator pos);
    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*") @Const DeviceContext get();
    }

    public DeviceContext[] get() {
        DeviceContext[] array = new DeviceContext[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE];
        for (int i = 0; i < array.length; i++) {
            array[i] = get(i);
        }
        return array;
    }
    @Override public String toString() {
        return java.util.Arrays.toString(get());
    }

    public DeviceContext pop_back() {
        long size = size();
        DeviceContext value = get(size - 1);
        resize(size - 1);
        return value;
    }
    public DeviceContextVector push_back(DeviceContext value) {
        long size = size();
        resize(size + 1);
        return put(size, value);
    }
    public DeviceContextVector put(DeviceContext value) {
        if (size() != 1) { resize(1); }
        return put(0, value);
    }
    public DeviceContextVector put(DeviceContext ... array) {
        if (size() != array.length) { resize(array.length); }
        for (int i = 0; i < array.length; i++) {
            put(i, array[i]);
        }
        return this;
    }
}

@Name("std::vector") public static class TensorVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public TensorVector(Pointer p) { super(p); }
    public TensorVector(Tensor value) { this(1); put(0, value); }
    public TensorVector(Tensor ... array) { this(array.length); put(array); }
    public TensorVector()       { allocate();  }
    public TensorVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator=") @ByRef TensorVector put(@ByRef TensorVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);

    @Index(function = "at") public native @ByRef Tensor get(@Cast("size_t") long i);
    public native TensorVector put(@Cast("size_t") long i, Tensor value);

    public native @ByVal Iterator insert(@ByVal Iterator pos, @ByRef Tensor value);
    public native @ByVal Iterator erase(@ByVal Iterator pos);
    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*") @ByRef @Const Tensor get();
    }

    public Tensor[] get() {
        Tensor[] array = new Tensor[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE];
        for (int i = 0; i < array.length; i++) {
            array[i] = get(i);
        }
        return array;
    }
    @Override public String toString() {
        return java.util.Arrays.toString(get());
    }

    public Tensor pop_back() {
        long size = size();
        Tensor value = get(size - 1);
        resize(size - 1);
        return value;
    }
    public TensorVector push_back(Tensor value) {
        long size = size();
        resize(size + 1);
        return put(size, value);
    }
    public TensorVector put(Tensor value) {
        if (size() != 1) { resize(1); }
        return put(0, value);
    }
    public TensorVector put(Tensor ... array) {
        if (size() != array.length) { resize(array.length); }
        for (int i = 0; i < array.length; i++) {
            put(i, array[i]);
        }
        return this;
    }
}

@Name("std::vector") public static class TensorProtoVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public TensorProtoVector(Pointer p) { super(p); }
    public TensorProtoVector(TensorProto value) { this(1); put(0, value); }
    public TensorProtoVector(TensorProto ... array) { this(array.length); put(array); }
    public TensorProtoVector()       { allocate();  }
    public TensorProtoVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator=") @ByRef TensorProtoVector put(@ByRef TensorProtoVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);

    @Index(function = "at") public native @ByRef TensorProto get(@Cast("size_t") long i);
    public native TensorProtoVector put(@Cast("size_t") long i, TensorProto value);

    public native @ByVal Iterator insert(@ByVal Iterator pos, @ByRef TensorProto value);
    public native @ByVal Iterator erase(@ByVal Iterator pos);
    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*") @ByRef @Const TensorProto get();
    }

    public TensorProto[] get() {
        TensorProto[] array = new TensorProto[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE];
        for (int i = 0; i < array.length; i++) {
            array[i] = get(i);
        }
        return array;
    }
    @Override public String toString() {
        return java.util.Arrays.toString(get());
    }

    public TensorProto pop_back() {
        long size = size();
        TensorProto value = get(size - 1);
        resize(size - 1);
        return value;
    }
    public TensorProtoVector push_back(TensorProto value) {
        long size = size();
        resize(size + 1);
        return put(size, value);
    }
    public TensorProtoVector put(TensorProto value) {
        if (size() != 1) { resize(1); }
        return put(0, value);
    }
    public TensorProtoVector put(TensorProto ... array) {
        if (size() != array.length) { resize(array.length); }
        for (int i = 0; i < array.length; i++) {
            put(i, array[i]);
        }
        return this;
    }
}

@Name("std::vector") public static class TensorShapeVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public TensorShapeVector(Pointer p) { super(p); }
    public TensorShapeVector(TensorShape value) { this(1); put(0, value); }
    public TensorShapeVector(TensorShape ... array) { this(array.length); put(array); }
    public TensorShapeVector()       { allocate();  }
    public TensorShapeVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator=") @ByRef TensorShapeVector put(@ByRef TensorShapeVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);

    @Index(function = "at") public native @ByRef TensorShape get(@Cast("size_t") long i);
    public native TensorShapeVector put(@Cast("size_t") long i, TensorShape value);

    public native @ByVal Iterator insert(@ByVal Iterator pos, @ByRef TensorShape value);
    public native @ByVal Iterator erase(@ByVal Iterator pos);
    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*") @ByRef @Const TensorShape get();
    }

    public TensorShape[] get() {
        TensorShape[] array = new TensorShape[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE];
        for (int i = 0; i < array.length; i++) {
            array[i] = get(i);
        }
        return array;
    }
    @Override public String toString() {
        return java.util.Arrays.toString(get());
    }

    public TensorShape pop_back() {
        long size = size();
        TensorShape value = get(size - 1);
        resize(size - 1);
        return value;
    }
    public TensorShapeVector push_back(TensorShape value) {
        long size = size();
        resize(size + 1);
        return put(size, value);
    }
    public TensorShapeVector put(TensorShape value) {
        if (size() != 1) { resize(1); }
        return put(0, value);
    }
    public TensorShapeVector put(TensorShape ... array) {
        if (size() != array.length) { resize(array.length); }
        for (int i = 0; i < array.length; i++) {
            put(i, array[i]);
        }
        return this;
    }
}

@Name("std::vector") public static class NodeOutVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public NodeOutVector(Pointer p) { super(p); }
    public NodeOutVector(NodeBuilder.NodeOut value) { this(1); put(0, value); }
    public NodeOutVector(NodeBuilder.NodeOut ... array) { this(array.length); put(array); }
    public NodeOutVector()       { allocate();  }
    public NodeOutVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator=") @ByRef NodeOutVector put(@ByRef NodeOutVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);

    @Index(function = "at") public native @ByRef NodeBuilder.NodeOut get(@Cast("size_t") long i);
    public native NodeOutVector put(@Cast("size_t") long i, NodeBuilder.NodeOut value);

    public native @ByVal Iterator insert(@ByVal Iterator pos, @ByRef NodeBuilder.NodeOut value);
    public native @ByVal Iterator erase(@ByVal Iterator pos);
    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*") @ByRef @Const NodeBuilder.NodeOut get();
    }

    public NodeBuilder.NodeOut[] get() {
        NodeBuilder.NodeOut[] array = new NodeBuilder.NodeOut[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE];
        for (int i = 0; i < array.length; i++) {
            array[i] = get(i);
        }
        return array;
    }
    @Override public String toString() {
        return java.util.Arrays.toString(get());
    }

    public NodeBuilder.NodeOut pop_back() {
        long size = size();
        NodeBuilder.NodeOut value = get(size - 1);
        resize(size - 1);
        return value;
    }
    public NodeOutVector push_back(NodeBuilder.NodeOut value) {
        long size = size();
        resize(size + 1);
        return put(size, value);
    }
    public NodeOutVector put(NodeBuilder.NodeOut value) {
        if (size() != 1) { resize(1); }
        return put(0, value);
    }
    public NodeOutVector put(NodeBuilder.NodeOut ... array) {
        if (size() != array.length) { resize(array.length); }
        for (int i = 0; i < array.length; i++) {
            put(i, array[i]);
        }
        return this;
    }
}

@Name("std::vector") public static class NodeVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public NodeVector(Pointer p) { super(p); }
    public NodeVector(Node value) { this(1); put(0, value); }
    public NodeVector(Node ... array) { this(array.length); put(array); }
    public NodeVector()       { allocate();  }
    public NodeVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator=") @ByRef NodeVector put(@ByRef NodeVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);

    @Index(function = "at") public native Node get(@Cast("size_t") long i);
    public native NodeVector put(@Cast("size_t") long i, Node value);

    public native @ByVal Iterator insert(@ByVal Iterator pos, Node value);
    public native @ByVal Iterator erase(@ByVal Iterator pos);
    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*") @Const Node get();
    }

    public Node[] get() {
        Node[] array = new Node[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE];
        for (int i = 0; i < array.length; i++) {
            array[i] = get(i);
        }
        return array;
    }
    @Override public String toString() {
        return java.util.Arrays.toString(get());
    }

    public Node pop_back() {
        long size = size();
        Node value = get(size - 1);
        resize(size - 1);
        return value;
    }
    public NodeVector push_back(Node value) {
        long size = size();
        resize(size + 1);
        return put(size, value);
    }
    public NodeVector put(Node value) {
        if (size() != 1) { resize(1); }
        return put(0, value);
    }
    public NodeVector put(Node ... array) {
        if (size() != array.length) { resize(array.length); }
        for (int i = 0; i < array.length; i++) {
            put(i, array[i]);
        }
        return this;
    }
}

@Name("std::vector >") public static class NodeIntPairVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public NodeIntPairVector(Pointer p) { super(p); }
    public NodeIntPairVector(Node[] firstValue, int[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); }
    public NodeIntPairVector()       { allocate();  }
    public NodeIntPairVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator=") @ByRef NodeIntPairVector put(@ByRef NodeIntPairVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);

    @Index(function = "at") public native Node first(@Cast("size_t") long i); public native NodeIntPairVector first(@Cast("size_t") long i, Node first);
    @Index(function = "at") public native int second(@Cast("size_t") long i);  public native NodeIntPairVector second(@Cast("size_t") long i, int second);

    public NodeIntPairVector put(Node[] firstValue, int[] secondValue) {
        for (int i = 0; i < firstValue.length && i < secondValue.length; i++) {
            first(i, firstValue[i]);
            second(i, secondValue[i]);
        }
        return this;
    }
}

@Name("std::vector >") public static class StringAttrPairVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringAttrPairVector(Pointer p) { super(p); }
    public StringAttrPairVector(BytePointer[] firstValue, FunctionDefHelper.AttrValueWrapper[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); }
    public StringAttrPairVector(String[] firstValue, FunctionDefHelper.AttrValueWrapper[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); }
    public StringAttrPairVector()       { allocate();  }
    public StringAttrPairVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator=") @ByRef StringAttrPairVector put(@ByRef StringAttrPairVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);

    @Index(function = "at") public native @StdString BytePointer first(@Cast("size_t") long i); public native StringAttrPairVector first(@Cast("size_t") long i, BytePointer first);
    @Index(function = "at") public native @ByRef FunctionDefHelper.AttrValueWrapper second(@Cast("size_t") long i);  public native StringAttrPairVector second(@Cast("size_t") long i, FunctionDefHelper.AttrValueWrapper second);
    @MemberSetter @Index(function = "at") public native StringAttrPairVector first(@Cast("size_t") long i, @StdString String first);

    public StringAttrPairVector put(BytePointer[] firstValue, FunctionDefHelper.AttrValueWrapper[] secondValue) {
        for (int i = 0; i < firstValue.length && i < secondValue.length; i++) {
            first(i, firstValue[i]);
            second(i, secondValue[i]);
        }
        return this;
    }

    public StringAttrPairVector put(String[] firstValue, FunctionDefHelper.AttrValueWrapper[] secondValue) {
        for (int i = 0; i < firstValue.length && i < secondValue.length; i++) {
            first(i, firstValue[i]);
            second(i, secondValue[i]);
        }
        return this;
    }
}

@Name("std::vector") public static class ConstTensorPtrVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public ConstTensorPtrVector(Pointer p) { super(p); }
    public ConstTensorPtrVector(Tensor value) { this(1); put(0, value); }
    public ConstTensorPtrVector(Tensor ... array) { this(array.length); put(array); }
    public ConstTensorPtrVector()       { allocate();  }
    public ConstTensorPtrVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator=") @ByRef ConstTensorPtrVector put(@ByRef ConstTensorPtrVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);

    @Index(function = "at") public native @Const Tensor get(@Cast("size_t") long i);
    public native ConstTensorPtrVector put(@Cast("size_t") long i, Tensor value);

    public native @ByVal Iterator insert(@ByVal Iterator pos, @Const Tensor value);
    public native @ByVal Iterator erase(@ByVal Iterator pos);
    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*") @Const Tensor get();
    }

    public Tensor[] get() {
        Tensor[] array = new Tensor[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE];
        for (int i = 0; i < array.length; i++) {
            array[i] = get(i);
        }
        return array;
    }
    @Override public String toString() {
        return java.util.Arrays.toString(get());
    }

    public Tensor pop_back() {
        long size = size();
        Tensor value = get(size - 1);
        resize(size - 1);
        return value;
    }
    public ConstTensorPtrVector push_back(Tensor value) {
        long size = size();
        resize(size + 1);
        return put(size, value);
    }
    public ConstTensorPtrVector put(Tensor value) {
        if (size() != 1) { resize(1); }
        return put(0, value);
    }
    public ConstTensorPtrVector put(Tensor ... array) {
        if (size() != array.length) { resize(array.length); }
        for (int i = 0; i < array.length; i++) {
            put(i, array[i]);
        }
        return this;
    }
}

@Name("std::vector") public static class ConstDimensionPtrVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public ConstDimensionPtrVector(Pointer p) { super(p); }
    public ConstDimensionPtrVector(Dimension value) { this(1); put(0, value); }
    public ConstDimensionPtrVector(Dimension ... array) { this(array.length); put(array); }
    public ConstDimensionPtrVector()       { allocate();  }
    public ConstDimensionPtrVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator=") @ByRef ConstDimensionPtrVector put(@ByRef ConstDimensionPtrVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);

    @Index(function = "at") public native @Const Dimension get(@Cast("size_t") long i);
    public native ConstDimensionPtrVector put(@Cast("size_t") long i, Dimension value);

    public native @ByVal Iterator insert(@ByVal Iterator pos, @Const Dimension value);
    public native @ByVal Iterator erase(@ByVal Iterator pos);
    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*") @Const Dimension get();
    }

    public Dimension[] get() {
        Dimension[] array = new Dimension[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE];
        for (int i = 0; i < array.length; i++) {
            array[i] = get(i);
        }
        return array;
    }
    @Override public String toString() {
        return java.util.Arrays.toString(get());
    }

    public Dimension pop_back() {
        long size = size();
        Dimension value = get(size - 1);
        resize(size - 1);
        return value;
    }
    public ConstDimensionPtrVector push_back(Dimension value) {
        long size = size();
        resize(size + 1);
        return put(size, value);
    }
    public ConstDimensionPtrVector put(Dimension value) {
        if (size() != 1) { resize(1); }
        return put(0, value);
    }
    public ConstDimensionPtrVector put(Dimension ... array) {
        if (size() != array.length) { resize(array.length); }
        for (int i = 0; i < array.length; i++) {
            put(i, array[i]);
        }
        return this;
    }
}

@Name("std::vector >") public static class StringTensorPairVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringTensorPairVector(Pointer p) { super(p); }
    public StringTensorPairVector(BytePointer[] firstValue, Tensor[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); }
    public StringTensorPairVector(String[] firstValue, Tensor[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); }
    public StringTensorPairVector()       { allocate();  }
    public StringTensorPairVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator=") @ByRef StringTensorPairVector put(@ByRef StringTensorPairVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);

    @Index(function = "at") public native @StdString BytePointer first(@Cast("size_t") long i); public native StringTensorPairVector first(@Cast("size_t") long i, BytePointer first);
    @Index(function = "at") public native @ByRef Tensor second(@Cast("size_t") long i);  public native StringTensorPairVector second(@Cast("size_t") long i, Tensor second);
    @MemberSetter @Index(function = "at") public native StringTensorPairVector first(@Cast("size_t") long i, @StdString String first);

    public StringTensorPairVector put(BytePointer[] firstValue, Tensor[] secondValue) {
        for (int i = 0; i < firstValue.length && i < secondValue.length; i++) {
            first(i, firstValue[i]);
            second(i, secondValue[i]);
        }
        return this;
    }

    public StringTensorPairVector put(String[] firstValue, Tensor[] secondValue) {
        for (int i = 0; i < firstValue.length && i < secondValue.length; i++) {
            first(i, firstValue[i]);
            second(i, secondValue[i]);
        }
        return this;
    }
}

@Name("std::vector") public static class EdgeVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public EdgeVector(Pointer p) { super(p); }
    public EdgeVector(Edge value) { this(1); put(0, value); }
    public EdgeVector(Edge ... array) { this(array.length); put(array); }
    public EdgeVector()       { allocate();  }
    public EdgeVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator=") @ByRef EdgeVector put(@ByRef EdgeVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);

    @Index(function = "at") public native Edge get(@Cast("size_t") long i);
    public native EdgeVector put(@Cast("size_t") long i, Edge value);

    public native @ByVal Iterator insert(@ByVal Iterator pos, Edge value);
    public native @ByVal Iterator erase(@ByVal Iterator pos);
    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*") @Const Edge get();
    }

    public Edge[] get() {
        Edge[] array = new Edge[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE];
        for (int i = 0; i < array.length; i++) {
            array[i] = get(i);
        }
        return array;
    }
    @Override public String toString() {
        return java.util.Arrays.toString(get());
    }

    public Edge pop_back() {
        long size = size();
        Edge value = get(size - 1);
        resize(size - 1);
        return value;
    }
    public EdgeVector push_back(Edge value) {
        long size = size();
        resize(size + 1);
        return put(size, value);
    }
    public EdgeVector put(Edge value) {
        if (size() != 1) { resize(1); }
        return put(0, value);
    }
    public EdgeVector put(Edge ... array) {
        if (size() != array.length) { resize(array.length); }
        for (int i = 0; i < array.length; i++) {
            put(i, array[i]);
        }
        return this;
    }
}

@Name("std::vector") public static class OpDefVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public OpDefVector(Pointer p) { super(p); }
    public OpDefVector(OpDef value) { this(1); put(0, value); }
    public OpDefVector(OpDef ... array) { this(array.length); put(array); }
    public OpDefVector()       { allocate();  }
    public OpDefVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator=") @ByRef OpDefVector put(@ByRef OpDefVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);

    @Index(function = "at") public native @ByRef OpDef get(@Cast("size_t") long i);
    public native OpDefVector put(@Cast("size_t") long i, OpDef value);

    public native @ByVal Iterator insert(@ByVal Iterator pos, @ByRef OpDef value);
    public native @ByVal Iterator erase(@ByVal Iterator pos);
    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*") @ByRef @Const OpDef get();
    }

    public OpDef[] get() {
        OpDef[] array = new OpDef[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE];
        for (int i = 0; i < array.length; i++) {
            array[i] = get(i);
        }
        return array;
    }
    @Override public String toString() {
        return java.util.Arrays.toString(get());
    }

    public OpDef pop_back() {
        long size = size();
        OpDef value = get(size - 1);
        resize(size - 1);
        return value;
    }
    public OpDefVector push_back(OpDef value) {
        long size = size();
        resize(size + 1);
        return put(size, value);
    }
    public OpDefVector put(OpDef value) {
        if (size() != 1) { resize(1); }
        return put(0, value);
    }
    public OpDefVector put(OpDef ... array) {
        if (size() != array.length) { resize(array.length); }
        for (int i = 0; i < array.length; i++) {
            put(i, array[i]);
        }
        return this;
    }
}

@Name("std::vector") public static class OutputVector extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public OutputVector(Pointer p) { super(p); }
    public OutputVector(Output value) { this(1); put(0, value); }
    public OutputVector(Output ... array) { this(array.length); put(array); }
    public OutputVector()       { allocate();  }
    public OutputVector(long n) { allocate(n); }
    private native void allocate();
    private native void allocate(@Cast("size_t") long n);
    public native @Name("operator=") @ByRef OutputVector put(@ByRef OutputVector x);

    public boolean empty() { return size() == 0; }
    public native long size();
    public void clear() { resize(0); }
    public native void resize(@Cast("size_t") long n);

    @Index(function = "at") public native @ByRef Output get(@Cast("size_t") long i);
    public native OutputVector put(@Cast("size_t") long i, Output value);

    public native @ByVal Iterator insert(@ByVal Iterator pos, @ByRef Output value);
    public native @ByVal Iterator erase(@ByVal Iterator pos);
    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*") @ByRef @Const Output get();
    }

    public Output[] get() {
        Output[] array = new Output[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE];
        for (int i = 0; i < array.length; i++) {
            array[i] = get(i);
        }
        return array;
    }
    @Override public String toString() {
        return java.util.Arrays.toString(get());
    }

    public Output pop_back() {
        long size = size();
        Output value = get(size - 1);
        resize(size - 1);
        return value;
    }
    public OutputVector push_back(Output value) {
        long size = size();
        resize(size + 1);
        return put(size, value);
    }
    public OutputVector put(Output value) {
        if (size() != 1) { resize(1); }
        return put(0, value);
    }
    public OutputVector put(Output ... array) {
        if (size() != array.length) { resize(array.length); }
        for (int i = 0; i < array.length; i++) {
            put(i, array[i]);
        }
        return this;
    }
}

@NoOffset @Name("std::pair") public static class LongLongPair extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public LongLongPair(Pointer p) { super(p); }
    public LongLongPair(long firstValue, long secondValue) { this(); put(firstValue, secondValue); }
    public LongLongPair()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef LongLongPair put(@ByRef LongLongPair x);


    @MemberGetter public native long first(); public native LongLongPair first(long first);
    @MemberGetter public native @Cast("google::protobuf::uint64") long second();  public native LongLongPair second(long second);

    public LongLongPair put(long firstValue, long secondValue) {
        first(firstValue);
        second(secondValue);
        return this;
    }
}

@NoOffset @Name("std::pair") public static class WrappedAllocator extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public WrappedAllocator(Pointer p) { super(p); }
    public WrappedAllocator(Allocator firstValue, TrackingAllocator secondValue) { this(); put(firstValue, secondValue); }
    public WrappedAllocator()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef WrappedAllocator put(@ByRef WrappedAllocator x);


    @MemberGetter public native Allocator first(); public native WrappedAllocator first(Allocator first);
    @MemberGetter public native TrackingAllocator second();  public native WrappedAllocator second(TrackingAllocator second);

    public WrappedAllocator put(Allocator firstValue, TrackingAllocator secondValue) {
        first(firstValue);
        second(secondValue);
        return this;
    }
}

@NoOffset @Name("std::pair") public static class ShapeHandlePair extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public ShapeHandlePair(Pointer p) { super(p); }
    public ShapeHandlePair(ShapeHandle firstValue, ShapeHandle secondValue) { this(); put(firstValue, secondValue); }
    public ShapeHandlePair()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef ShapeHandlePair put(@ByRef ShapeHandlePair x);


    @MemberGetter public native @ByRef ShapeHandle first(); public native ShapeHandlePair first(ShapeHandle first);
    @MemberGetter public native @ByRef ShapeHandle second();  public native ShapeHandlePair second(ShapeHandle second);

    public ShapeHandlePair put(ShapeHandle firstValue, ShapeHandle secondValue) {
        first(firstValue);
        second(secondValue);
        return this;
    }
}

@NoOffset @Name("std::pair") public static class DimensionHandlePair extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public DimensionHandlePair(Pointer p) { super(p); }
    public DimensionHandlePair(DimensionHandle firstValue, DimensionHandle secondValue) { this(); put(firstValue, secondValue); }
    public DimensionHandlePair()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef DimensionHandlePair put(@ByRef DimensionHandlePair x);


    @MemberGetter public native @ByRef DimensionHandle first(); public native DimensionHandlePair first(DimensionHandle first);
    @MemberGetter public native @ByRef DimensionHandle second();  public native DimensionHandlePair second(DimensionHandle second);

    public DimensionHandlePair put(DimensionHandle firstValue, DimensionHandle secondValue) {
        first(firstValue);
        second(secondValue);
        return this;
    }
}

@NoOffset @Name("std::pair") public static class EdgeSetBoolPair extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public EdgeSetBoolPair(Pointer p) { super(p); }
    public EdgeSetBoolPair(EdgeSetIterator firstValue, boolean secondValue) { this(); put(firstValue, secondValue); }
    public EdgeSetBoolPair()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef EdgeSetBoolPair put(@ByRef EdgeSetBoolPair x);


    @MemberGetter public native @ByRef EdgeSetIterator first(); public native EdgeSetBoolPair first(EdgeSetIterator first);
    @MemberGetter public native @Cast("bool") boolean second();  public native EdgeSetBoolPair second(boolean second);

    public EdgeSetBoolPair put(EdgeSetIterator firstValue, boolean secondValue) {
        first(firstValue);
        second(secondValue);
        return this;
    }
}

@NoOffset @Name("std::pair") public static class StringIntPair extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringIntPair(Pointer p) { super(p); }
    public StringIntPair(BytePointer firstValue, int secondValue) { this(); put(firstValue, secondValue); }
    public StringIntPair(String firstValue, int secondValue) { this(); put(firstValue, secondValue); }
    public StringIntPair()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef StringIntPair put(@ByRef StringIntPair x);


    @MemberGetter public native @StdString BytePointer first(); public native StringIntPair first(BytePointer first);
    @MemberGetter public native int second();  public native StringIntPair second(int second);
    @MemberSetter @Index public native StringIntPair first(@StdString String first);

    public StringIntPair put(BytePointer firstValue, int secondValue) {
        first(firstValue);
        second(secondValue);
        return this;
    }

    public StringIntPair put(String firstValue, int secondValue) {
        first(firstValue);
        second(secondValue);
        return this;
    }
}

@NoOffset @Name("std::pair") public static class StringPieceIntPair extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringPieceIntPair(Pointer p) { super(p); }
    public StringPieceIntPair(BytePointer firstValue, int secondValue) { this(); put(firstValue, secondValue); }
    public StringPieceIntPair(String firstValue, int secondValue) { this(); put(firstValue, secondValue); }
    public StringPieceIntPair()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef StringPieceIntPair put(@ByRef StringPieceIntPair x);


    @MemberGetter public native @StringPiece BytePointer first(); public native StringPieceIntPair first(BytePointer first);
    @MemberGetter public native int second();  public native StringPieceIntPair second(int second);
    @MemberSetter @Index public native StringPieceIntPair first(@StringPiece String first);

    public StringPieceIntPair put(BytePointer firstValue, int secondValue) {
        first(firstValue);
        second(secondValue);
        return this;
    }

    public StringPieceIntPair put(String firstValue, int secondValue) {
        first(firstValue);
        second(secondValue);
        return this;
    }
}

@NoOffset @Name("std::pair") public static class TensorSlideStringPair extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public TensorSlideStringPair(Pointer p) { super(p); }
    public TensorSlideStringPair(TensorSlice firstValue, BytePointer secondValue) { this(); put(firstValue, secondValue); }
    public TensorSlideStringPair(TensorSlice firstValue, String secondValue) { this(); put(firstValue, secondValue); }
    public TensorSlideStringPair()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef TensorSlideStringPair put(@ByRef TensorSlideStringPair x);


    @MemberGetter public native @ByRef TensorSlice first(); public native TensorSlideStringPair first(TensorSlice first);
    @MemberGetter public native @StdString BytePointer second();  public native TensorSlideStringPair second(BytePointer second);
    @MemberSetter @Index public native TensorSlideStringPair second(@StdString String second);

    public TensorSlideStringPair put(TensorSlice firstValue, BytePointer secondValue) {
        first(firstValue);
        second(secondValue);
        return this;
    }

    public TensorSlideStringPair put(TensorSlice firstValue, String secondValue) {
        first(firstValue);
        second(secondValue);
        return this;
    }
}

@NoOffset @Name("std::pair") public static class NodeIndexPair extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public NodeIndexPair(Pointer p) { super(p); }
    public NodeIndexPair(Node firstValue, int secondValue) { this(); put(firstValue, secondValue); }
    public NodeIndexPair()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef NodeIndexPair put(@ByRef NodeIndexPair x);


    @MemberGetter public native Node first(); public native NodeIndexPair first(Node first);
    @MemberGetter public native @Cast("tensorflow::ImportGraphDefResults::Index") int second();  public native NodeIndexPair second(int second);

    public NodeIndexPair put(Node firstValue, int secondValue) {
        first(firstValue);
        second(secondValue);
        return this;
    }
}

@Name("std::unordered_map") public static class StringSliceInfoMap extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringSliceInfoMap(Pointer p) { super(p); }
    public StringSliceInfoMap()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef StringSliceInfoMap put(@ByRef StringSliceInfoMap x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index(function = "at") public native @ByRef TensorSliceSet.SliceInfo get(@StdString BytePointer i);

    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*().first") @MemberGetter @StdString BytePointer first();
        public native @Name("operator*().second") @MemberGetter @ByRef @Const TensorSliceSet.SliceInfo second();
    }
}

@Name("std::unordered_map") public static class VarToShapeMap extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public VarToShapeMap(Pointer p) { super(p); }
    public VarToShapeMap()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef VarToShapeMap put(@ByRef VarToShapeMap x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @ByRef TensorShape get(@StdString BytePointer i);
    public native VarToShapeMap put(@StdString BytePointer i, TensorShape value);

    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*().first") @MemberGetter @StdString BytePointer first();
        public native @Name("operator*().second") @MemberGetter @ByRef @Const TensorShape second();
    }
}

@Name("std::unordered_map") public static class VarToDataTypeMap extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public VarToDataTypeMap(Pointer p) { super(p); }
    public VarToDataTypeMap()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef VarToDataTypeMap put(@ByRef VarToDataTypeMap x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native @Cast("tensorflow::DataType") int get(@StdString BytePointer i);
    public native VarToDataTypeMap put(@StdString BytePointer i, int value);

    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*().first") @MemberGetter @StdString BytePointer first();
        public native @Name("operator*().second") @MemberGetter @Cast("tensorflow::DataType") int second();
    }
}

@Name("std::unordered_map") public static class StringTensorSliceSetMap extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringTensorSliceSetMap(Pointer p) { super(p); }
    public StringTensorSliceSetMap()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef StringTensorSliceSetMap put(@ByRef StringTensorSliceSetMap x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native TensorSliceSet get(@StdString BytePointer i);
    public native StringTensorSliceSetMap put(@StdString BytePointer i, TensorSliceSet value);

    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*().first") @MemberGetter @StdString BytePointer first();
        public native @Name("operator*().second") @MemberGetter @Const TensorSliceSet second();
    }
}

@Name("std::unordered_map") public static class StringNodeMap extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringNodeMap(Pointer p) { super(p); }
    public StringNodeMap()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef StringNodeMap put(@ByRef StringNodeMap x);

    public boolean empty() { return size() == 0; }
    public native long size();

    @Index public native Node get(@StdString BytePointer i);
    public native StringNodeMap put(@StdString BytePointer i, Node value);

    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*().first") @MemberGetter @StdString BytePointer first();
        public native @Name("operator*().second") @MemberGetter @Const Node second();
    }
}

@Name("std::unordered_set") public static class StringUnorderedSet extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public StringUnorderedSet(Pointer p) { super(p); }
    public StringUnorderedSet()       { allocate();  }
    private native void allocate();
    public native @Name("operator=") @ByRef StringUnorderedSet put(@ByRef StringUnorderedSet x);

    public boolean empty() { return size() == 0; }
    public native long size();

    public native void insert(@StdString BytePointer value);
    public native void erase(@StdString BytePointer value);
    public native @ByVal Iterator begin();
    public native @ByVal Iterator end();
    @NoOffset @Name("iterator") public static class Iterator extends Pointer {
        public Iterator(Pointer p) { super(p); }
        public Iterator() { }

        public native @Name("operator++") @ByRef Iterator increment();
        public native @Name("operator==") boolean equals(@ByRef Iterator it);
        public native @Name("operator*") @StdString BytePointer get();
    }
}

// Parsed from google/protobuf/arena.h

// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc.  All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
//     * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//     * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//     * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

// This file defines an Arena allocator for better allocation performance.

// #ifndef GOOGLE_PROTOBUF_ARENA_H__
// #define GOOGLE_PROTOBUF_ARENA_H__

// #include 
// #ifdef max
// #undef max  // Visual Studio defines this macro
// #endif
// #if defined(_MSC_VER) && !defined(_LIBCPP_STD_VER) && !_HAS_EXCEPTIONS
// Work around bugs in MSVC  header when _HAS_EXCEPTIONS=0.
// #include 
// #include 

// #else
// #include 
// #endif

// #include 
// #include 
// #include   // defined below

  // namespace protobuf



  // namespace quality_webanswers          // defined below        // defined in message.h



  // namespace arena_metrics

@Namespace("google::protobuf::internal") @Opaque public static class ArenaStringPtr extends Pointer {
    /** Empty constructor. Calls {@code super((Pointer)null)}. */
    public ArenaStringPtr() { super((Pointer)null); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public ArenaStringPtr(Pointer p) { super(p); }
}     // defined in arenastring.h
@Namespace("google::protobuf::internal") @Opaque public static class LazyField extends Pointer {
    /** Empty constructor. Calls {@code super((Pointer)null)}. */
    public LazyField() { super((Pointer)null); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public LazyField(Pointer p) { super(p); }
}           // defined in lazy_field.h  // defined in repeated_field.h

// Templated cleanup methods.
@Namespace("google::protobuf::internal") public static native void arena_free(Pointer object, @Cast("size_t") long size);

  // namespace internal

// ArenaOptions provides optional additional parameters to arena construction
// that control its block-allocation behavior.
@Namespace("google::protobuf") @NoOffset public static class ArenaOptions extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public ArenaOptions(Pointer p) { super(p); }
    /** Native array allocator. Access with {@link Pointer#position(long)}. */
    public ArenaOptions(long size) { super((Pointer)null); allocateArray(size); }
    private native void allocateArray(long size);
    @Override public ArenaOptions position(long position) {
        return (ArenaOptions)super.position(position);
    }

  // This defines the size of the first block requested from the system malloc.
  // Subsequent block sizes will increase in a geometric series up to a maximum.
  public native @Cast("size_t") long start_block_size(); public native ArenaOptions start_block_size(long start_block_size);

  // This defines the maximum block size requested from system malloc (unless an
  // individual arena allocation request occurs with a size larger than this
  // maximum). Requested block sizes increase up to this value, then remain
  // here.
  public native @Cast("size_t") long max_block_size(); public native ArenaOptions max_block_size(long max_block_size);

  // An initial block of memory for the arena to use, or NULL for none. If
  // provided, the block must live at least as long as the arena itself. The
  // creator of the Arena retains ownership of the block after the Arena is
  // destroyed.
  public native @Cast("char*") BytePointer initial_block(); public native ArenaOptions initial_block(BytePointer initial_block);

  // The size of the initial block, if provided.
  public native @Cast("size_t") long initial_block_size(); public native ArenaOptions initial_block_size(long initial_block_size);

  // A function pointer to an alloc method that returns memory blocks of size
  // requested. By default, it contains a ptr to the malloc function.
  //
  // NOTE: block_alloc and dealloc functions are expected to behave like
  // malloc and free, including Asan poisoning.
  public static class Block_alloc_long extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    Block_alloc_long(Pointer p) { super(p); }
      protected Block_alloc_long() { allocate(); }
      private native void allocate();
      public native Pointer call(@Cast("size_t") long arg0);
  }
  public native Block_alloc_long block_alloc(); public native ArenaOptions block_alloc(Block_alloc_long block_alloc);
  // A function pointer to a dealloc method that takes ownership of the blocks
  // from the arena. By default, it contains a ptr to a wrapper function that
  // calls free.
  public static class Block_dealloc_Pointer_long extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    Block_dealloc_Pointer_long(Pointer p) { super(p); }
      protected Block_dealloc_Pointer_long() { allocate(); }
      private native void allocate();
      public native void call(Pointer arg0, @Cast("size_t") long arg1);
  }
  public native Block_dealloc_Pointer_long block_dealloc(); public native ArenaOptions block_dealloc(Block_dealloc_Pointer_long block_dealloc);

  public ArenaOptions() { super((Pointer)null); allocate(); }
  private native void allocate();
}

// Support for non-RTTI environments. (The metrics hooks API uses type
// information.)
// #ifndef GOOGLE_PROTOBUF_NO_RTTI
// #define RTTI_TYPE_ID(type) (&typeid(type))
// #else
// #define RTTI_TYPE_ID(type) (NULL)
// #endif

// Arena allocator. Arena allocation replaces ordinary (heap-based) allocation
// with new/delete, and improves performance by aggregating allocations into
// larger blocks and freeing allocations all at once. Protocol messages are
// allocated on an arena by using Arena::CreateMessage(Arena*), below, and
// are automatically freed when the arena is destroyed.
//
// This is a thread-safe implementation: multiple threads may allocate from the
// arena concurrently. Destruction is not thread-safe and the destructing
// thread must synchronize with users of the arena first.
//
// An arena provides two allocation interfaces: CreateMessage, which works
// for arena-enabled proto2 message types as well as other types that satisfy
// the appropriate protocol (described below), and Create, which works for
// any arbitrary type T. CreateMessage is better when the type T supports it,
// because this interface (i) passes the arena pointer to the created object so
// that its sub-objects and internal allocations can use the arena too, and (ii)
// elides the object's destructor call when possible. Create does not place
// any special requirements on the type T, and will invoke the object's
// destructor when the arena is destroyed.
//
// The arena message allocation protocol, required by CreateMessage, is as
// follows:
//
// - The type T must have (at least) two constructors: a constructor with no
//   arguments, called when a T is allocated on the heap; and a constructor with
//   a google::protobuf::Arena* argument, called when a T is allocated on an arena. If the
//   second constructor is called with a NULL arena pointer, it must be
//   equivalent to invoking the first (no-argument) constructor.
//
// - The type T must have a particular type trait: a nested type
//   |InternalArenaConstructable_|. This is usually a typedef to |void|. If no
//   such type trait exists, then the instantiation CreateMessage will fail
//   to compile.
//
// - The type T *may* have the type trait |DestructorSkippable_|. If this type
//   trait is present in the type, then its destructor will not be called if and
//   only if it was passed a non-NULL arena pointer. If this type trait is not
//   present on the type, then its destructor is always called when the
//   containing arena is destroyed.
//
// - One- and two-user-argument forms of CreateMessage() also exist that
//   forward these constructor arguments to T's constructor: for example,
//   CreateMessage(Arena*, arg1, arg2) forwards to a constructor T(Arena*,
//   arg1, arg2).
//
// This protocol is implemented by all arena-enabled proto2 message classes as
// well as RepeatedPtrField.
//
// Do NOT subclass Arena. This class will be marked as final when C++11 is
// enabled.
@Namespace("google::protobuf") @NoOffset public static class Arena extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public Arena(Pointer p) { super(p); }
    /** Native array allocator. Access with {@link Pointer#position(long)}. */
    public Arena(long size) { super((Pointer)null); allocateArray(size); }
    private native void allocateArray(long size);
    @Override public Arena position(long position) {
        return (Arena)super.position(position);
    }

  // Arena constructor taking custom options. See ArenaOptions below for
  // descriptions of the options available.
  public Arena(@Const @ByRef ArenaOptions options) { super((Pointer)null); allocate(options); }
  private native void allocate(@Const @ByRef ArenaOptions options);

  // Block overhead.  Use this as a guide for how much to over-allocate the
  // initial block if you want an allocation of size N to fit inside it.
  //
  // WARNING: if you allocate multiple objects, it is difficult to guarantee
  // that a series of allocations will fit in the initial block, especially if
  // Arena changes its alignment guarantees in the future!
  @MemberGetter public static native @Cast("const size_t") long kBlockOverhead();
  public static final long kBlockOverhead = kBlockOverhead();

  // Default constructor with sensible default options, tuned for average
  // use-cases.
  public Arena() { super((Pointer)null); allocate(); }
  private native void allocate();

  public native void Init(@Const @ByRef ArenaOptions options);

  // API to create proto2 message objects on the arena. If the arena passed in
  // is NULL, then a heap allocated object is returned. Type T must be a message
  // defined in a .proto file with cc_enable_arenas set to true, otherwise a
  // compilation error will occur.
  //
  // RepeatedField and RepeatedPtrField may also be instantiated directly on an
  // arena with this method.
  //
  // This function also accepts any type T that satisfies the arena message
  // allocation protocol, documented above.

  // API to create any objects on the arena. Note that only the object will
  // be created on the arena; the underlying ptrs (in case of a proto2 message)
  // will be still heap allocated. Proto messages should usually be allocated
  // with CreateMessage() instead.
  //
  // Note that even if T satisfies the arena message construction protocol
  // (InternalArenaConstructable_ trait and optional DestructorSkippable_
  // trait), as described above, this function does not follow the protocol;
  // instead, it treats T as a black-box type, just as if it did not have these
  // traits. Specifically, T's constructor arguments will always be only those
  // passed to Create() -- no additional arena pointer is implicitly added.
  // Furthermore, the destructor will always be called at arena destruction time
  // (unless the destructor is trivial). Hence, from T's point of view, it is as
  // if the object were allocated on the heap (except that the underlying memory
  // is obtained from the arena).

  // Create an array of object type T on the arena *without* invoking the
  // constructor of T. If `arena` is null, then the return value should be freed
  // with `delete[] x;` (or `::operator delete[](x);`).
  // To ensure safe uses, this function checks at compile time
  // (when compiled as C++11) that T is trivially default-constructible and
  // trivially destructible.

  // Returns the total space allocated by the arena, which is the sum of the
  // sizes of the underlying blocks. This method is relatively fast; a counter
  // is kept as blocks are allocated.
  public native @Cast("google::protobuf::uint64") long SpaceAllocated();
  // Returns the total space used by the arena. Similar to SpaceAllocated but
  // does not include free space and block overhead. The total space returned
  // may not include space used by other threads executing concurrently with
  // the call to this method.
  public native @Cast("google::protobuf::uint64") long SpaceUsed();
  // DEPRECATED. Please use SpaceAllocated() and SpaceUsed().
  //
  // Combines SpaceAllocated and SpaceUsed. Returns a pair of
  // .
  public native @Deprecated @ByVal LongLongPair SpaceAllocatedAndUsed();

  // Frees all storage allocated by this arena after calling destructors
  // registered with OwnDestructor() and freeing objects registered with Own().
  // Any objects allocated on this arena are unusable after this call. It also
  // returns the total space used by the arena which is the sums of the sizes
  // of the allocated blocks. This method is not thread-safe.
  public native @Cast("google::protobuf::uint64") long Reset();

  // Adds |object| to a list of heap-allocated objects to be freed with |delete|
  // when the arena is destroyed or reset.

  // Adds |object| to a list of objects whose destructors will be manually
  // called when the arena is destroyed or reset. This differs from Own() in
  // that it does not free the underlying memory with |delete|; hence, it is
  // normally only used for objects that are placement-newed into
  // arena-allocated memory.

  // Adds a custom member function on an object to the list of destructors that
  // will be manually called when the arena is destroyed or reset. This differs
  // from OwnDestructor() in that any member function may be specified, not only
  // the class destructor.
  public static class Destruct_Pointer extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    Destruct_Pointer(Pointer p) { super(p); }
      protected Destruct_Pointer() { allocate(); }
      private native void allocate();
      public native void call(Pointer arg0);
  }
  public native void OwnCustomDestructor(
        Pointer object, Destruct_Pointer destruct);

  // Retrieves the arena associated with |value| if |value| is an arena-capable
  // message, or NULL otherwise. This differs from value->GetArena() in that the
  // latter is a virtual call, while this method is a templated call that
  // resolves at compile-time.

  // Helper typetraits that indicates support for arenas in a type T at compile
  // time. This is public only to allow construction of higher-level templated
  // utilities.
  //
  // is_arena_constructable::value is true if the message type T has arena
  // support enabled, and false otherwise.
  //
  // is_destructor_skippable::value is true if the message type T has told
  // the arena that it is safe to skip the destructor, and false otherwise.
  //
  // This is inside Arena because only Arena has the friend relationships
  // necessary to see the underlying generated code traits.
}

// Defined above for supporting environments without RTTI.
// #undef RTTI_TYPE_ID

  // namespace protobuf

  // namespace google
// #endif  // GOOGLE_PROTOBUF_ARENA_H__


// Parsed from google/protobuf/message_lite.h

// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc.  All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
//     * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//     * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//     * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

// Authors: [email protected] (Wink Saville),
//          [email protected] (Kenton Varda)
//  Based on original Protocol Buffers design by
//  Sanjay Ghemawat, Jeff Dean, and others.
//
// Defines MessageLite, the abstract interface implemented by all (lite
// and non-lite) protocol message objects.

// #ifndef GOOGLE_PROTOBUF_MESSAGE_LITE_H__
// #define GOOGLE_PROTOBUF_MESSAGE_LITE_H__

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
@Namespace("google::protobuf::io") @Opaque public static class CodedInputStream extends Pointer {
    /** Empty constructor. Calls {@code super((Pointer)null)}. */
    public CodedInputStream() { super((Pointer)null); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public CodedInputStream(Pointer p) { super(p); }
}
@Namespace("google::protobuf::io") @Opaque public static class CodedOutputStream extends Pointer {
    /** Empty constructor. Calls {@code super((Pointer)null)}. */
    public CodedOutputStream() { super((Pointer)null); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public CodedOutputStream(Pointer p) { super(p); }
}
@Namespace("google::protobuf::io") @Opaque public static class ZeroCopyInputStream extends Pointer {
    /** Empty constructor. Calls {@code super((Pointer)null)}. */
    public ZeroCopyInputStream() { super((Pointer)null); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public ZeroCopyInputStream(Pointer p) { super(p); }
}
@Namespace("google::protobuf::io") @Opaque public static class ZeroCopyOutputStream extends Pointer {
    /** Empty constructor. Calls {@code super((Pointer)null)}. */
    public ZeroCopyOutputStream() { super((Pointer)null); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public ZeroCopyOutputStream(Pointer p) { super(p); }
}


@Namespace("google::protobuf::internal") @Opaque public static class RepeatedPtrFieldBase extends Pointer {
    /** Empty constructor. Calls {@code super((Pointer)null)}. */
    public RepeatedPtrFieldBase() { super((Pointer)null); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public RepeatedPtrFieldBase(Pointer p) { super(p); }
}
@Namespace("google::protobuf::internal") @Opaque public static class WireFormatLite extends Pointer {
    /** Empty constructor. Calls {@code super((Pointer)null)}. */
    public WireFormatLite() { super((Pointer)null); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public WireFormatLite(Pointer p) { super(p); }
}
@Namespace("google::protobuf::internal") @Opaque public static class WeakFieldMap extends Pointer {
    /** Empty constructor. Calls {@code super((Pointer)null)}. */
    public WeakFieldMap() { super((Pointer)null); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public WeakFieldMap(Pointer p) { super(p); }
}

// #ifndef SWIG
// #endif  // SWIG
  // namespace internal

// Interface to light weight protocol messages.
//
// This interface is implemented by all protocol message objects.  Non-lite
// messages additionally implement the Message interface, which is a
// subclass of MessageLite.  Use MessageLite instead when you only need
// the subset of features which it supports -- namely, nothing that uses
// descriptors or reflection.  You can instruct the protocol compiler
// to generate classes which implement only MessageLite, not the full
// Message interface, by adding the following line to the .proto file:
//
//   option optimize_for = LITE_RUNTIME;
//
// This is particularly useful on resource-constrained systems where
// the full protocol buffers runtime library is too big.
//
// Note that on non-constrained systems (e.g. servers) when you need
// to link in lots of protocol definitions, a better way to reduce
// total code footprint is to use optimize_for = CODE_SIZE.  This
// will make the generated code smaller while still supporting all the
// same features (at the expense of speed).  optimize_for = LITE_RUNTIME
// is best when you only have a small number of message types linked
// into your binary, in which case the size of the protocol buffers
// runtime itself is the biggest problem.
@Namespace("google::protobuf") public static class MessageLite extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public MessageLite(Pointer p) { super(p); }


  // Basic Operations ------------------------------------------------

  // Get the name of this message type, e.g. "foo.bar.BazProto".
  public native @StdString BytePointer GetTypeName();

  // Construct a new instance of the same type.  Ownership is passed to the
  // caller.
  public native MessageLite New();

  // Construct a new instance on the arena. Ownership is passed to the caller
  // if arena is a NULL. Default implementation for backwards compatibility.
  public native MessageLite New(Arena arena);

  // Get the arena, if any, associated with this message. Virtual method
  // required for generic operations but most arena-related operations should
  // use the GetArenaNoVirtual() generated-code method. Default implementation
  // to reduce code size by avoiding the need for per-type implementations
  // when types do not implement arena support.
  public native Arena GetArena();

  // Get a pointer that may be equal to this message's arena, or may not be.
  // If the value returned by this method is equal to some arena pointer, then
  // this message is on that arena; however, if this message is on some arena,
  // this method may or may not return that arena's pointer. As a tradeoff,
  // this method may be more efficient than GetArena(). The intent is to allow
  // underlying representations that use e.g. tagged pointers to sometimes
  // store the arena pointer directly, and sometimes in a more indirect way,
  // and allow a fastpath comparison against the arena pointer when it's easy
  // to obtain.
  public native Pointer GetMaybeArenaPointer();

  // Clear all fields of the message and set them to their default values.
  // Clear() avoids freeing memory, assuming that any memory allocated
  // to hold parts of the message will be needed again to hold the next
  // message.  If you actually want to free the memory used by a Message,
  // you must delete it.
  public native void Clear();

  // Quickly check if all required fields have values set.
  public native @Cast("bool") boolean IsInitialized();

  // This is not implemented for Lite messages -- it just returns "(cannot
  // determine missing fields for lite message)".  However, it is implemented
  // for full messages.  See message.h.
  public native @StdString BytePointer InitializationErrorString();

  // If |other| is the exact same class as this, calls MergeFrom(). Otherwise,
  // results are undefined (probably crash).
  public native void CheckTypeAndMergeFrom(@Const @ByRef MessageLite other);

  // Parsing ---------------------------------------------------------
  // Methods for parsing in protocol buffer format.  Most of these are
  // just simple wrappers around MergeFromCodedStream().  Clear() will be
  // called before merging the input.

  // Fill the message with a protocol buffer parsed from the given input
  // stream. Returns false on a read error or if the input is in the wrong
  // format.  A successful return does not indicate the entire input is
  // consumed, ensure you call ConsumedEntireMessage() to check that if
  // applicable.
  public native @Cast("bool") boolean ParseFromCodedStream(CodedInputStream input);
  // Like ParseFromCodedStream(), but accepts messages that are missing
  // required fields.
  public native @Cast("bool") boolean ParsePartialFromCodedStream(CodedInputStream input);
  // Read a protocol buffer from the given zero-copy input stream.  If
  // successful, the entire input will be consumed.
  public native @Cast("bool") boolean ParseFromZeroCopyStream(ZeroCopyInputStream input);
  // Like ParseFromZeroCopyStream(), but accepts messages that are missing
  // required fields.
  public native @Cast("bool") boolean ParsePartialFromZeroCopyStream(ZeroCopyInputStream input);
  // Read a protocol buffer from the given zero-copy input stream, expecting
  // the message to be exactly "size" bytes long.  If successful, exactly
  // this many bytes will have been consumed from the input.
  public native @Cast("bool") boolean ParseFromBoundedZeroCopyStream(ZeroCopyInputStream input, int size);
  // Like ParseFromBoundedZeroCopyStream(), but accepts messages that are
  // missing required fields.
  public native @Cast("bool") boolean ParsePartialFromBoundedZeroCopyStream(ZeroCopyInputStream input,
                                               int size);
  // Parses a protocol buffer contained in a string. Returns true on success.
  // This function takes a string in the (non-human-readable) binary wire
  // format, matching the encoding output by MessageLite::SerializeToString().
  // If you'd like to convert a human-readable string into a protocol buffer
  // object, see google::protobuf::TextFormat::ParseFromString().
  public native @Cast("bool") boolean ParseFromString(@StdString BytePointer data);
  public native @Cast("bool") boolean ParseFromString(@StdString String data);
  // Like ParseFromString(), but accepts messages that are missing
  // required fields.
  public native @Cast("bool") boolean ParsePartialFromString(@StdString BytePointer data);
  public native @Cast("bool") boolean ParsePartialFromString(@StdString String data);
  // Parse a protocol buffer contained in an array of bytes.
  public native @Cast("bool") boolean ParseFromArray(@Const Pointer data, int size);
  // Like ParseFromArray(), but accepts messages that are missing
  // required fields.
  public native @Cast("bool") boolean ParsePartialFromArray(@Const Pointer data, int size);


  // Reads a protocol buffer from the stream and merges it into this
  // Message.  Singular fields read from the what is
  // already in the Message and repeated fields are appended to those
  // already present.
  //
  // It is the responsibility of the caller to call input->LastTagWas()
  // (for groups) or input->ConsumedEntireMessage() (for non-groups) after
  // this returns to verify that the message's end was delimited correctly.
  //
  // ParsefromCodedStream() is implemented as Clear() followed by
  // MergeFromCodedStream().
  public native @Cast("bool") boolean MergeFromCodedStream(CodedInputStream input);

  // Like MergeFromCodedStream(), but succeeds even if required fields are
  // missing in the input.
  //
  // MergeFromCodedStream() is just implemented as MergePartialFromCodedStream()
  // followed by IsInitialized().
  public native @Cast("bool") boolean MergePartialFromCodedStream(CodedInputStream input);


  // Serialization ---------------------------------------------------
  // Methods for serializing in protocol buffer format.  Most of these
  // are just simple wrappers around ByteSize() and SerializeWithCachedSizes().

  // Write a protocol buffer of this message to the given output.  Returns
  // false on a write error.  If the message is missing required fields,
  // this may GOOGLE_CHECK-fail.
  public native @Cast("bool") boolean SerializeToCodedStream(CodedOutputStream output);
  // Like SerializeToCodedStream(), but allows missing required fields.
  public native @Cast("bool") boolean SerializePartialToCodedStream(CodedOutputStream output);
  // Write the message to the given zero-copy output stream.  All required
  // fields must be set.
  public native @Cast("bool") boolean SerializeToZeroCopyStream(ZeroCopyOutputStream output);
  // Like SerializeToZeroCopyStream(), but allows missing required fields.
  public native @Cast("bool") boolean SerializePartialToZeroCopyStream(ZeroCopyOutputStream output);
  // Serialize the message and store it in the given string.  All required
  // fields must be set.
  public native @Cast("bool") boolean SerializeToString(@StdString @Cast({"char*", "std::string*"}) BytePointer output);
  // Like SerializeToString(), but allows missing required fields.
  public native @Cast("bool") boolean SerializePartialToString(@StdString @Cast({"char*", "std::string*"}) BytePointer output);
  // Serialize the message and store it in the given byte array.  All required
  // fields must be set.
  public native @Cast("bool") boolean SerializeToArray(Pointer data, int size);
  // Like SerializeToArray(), but allows missing required fields.
  public native @Cast("bool") boolean SerializePartialToArray(Pointer data, int size);

  // Make a string encoding the message. Is equivalent to calling
  // SerializeToString() on a string and using that.  Returns the empty
  // string if SerializeToString() would have returned an error.
  // Note: If you intend to generate many such strings, you may
  // reduce heap fragmentation by instead re-using the same string
  // object with calls to SerializeToString().
  public native @StdString BytePointer SerializeAsString();
  // Like SerializeAsString(), but allows missing required fields.
  public native @StdString BytePointer SerializePartialAsString();

  // Like SerializeToString(), but appends to the data to the string's existing
  // contents.  All required fields must be set.
  public native @Cast("bool") boolean AppendToString(@StdString @Cast({"char*", "std::string*"}) BytePointer output);
  // Like AppendToString(), but allows missing required fields.
  public native @Cast("bool") boolean AppendPartialToString(@StdString @Cast({"char*", "std::string*"}) BytePointer output);

  // Computes the serialized size of the message.  This recursively calls
  // ByteSizeLong() on all embedded messages.
  //
  // ByteSizeLong() is generally linear in the number of fields defined for the
  // proto.
  public native @Cast("size_t") long ByteSizeLong();

  // Legacy ByteSize() API.
  public native @Deprecated int ByteSize();

  // Serializes the message without recomputing the size.  The message must not
  // have changed since the last call to ByteSize(), and the value returned by
  // ByteSize must be non-negative.  Otherwise the results are undefined.
  public native void SerializeWithCachedSizes(
        CodedOutputStream output);

  // Functions below here are not part of the public interface.  It isn't
  // enforced, but they should be treated as private, and will be private
  // at some future time.  Unfortunately the implementation of the "friend"
  // keyword in GCC is broken at the moment, but we expect it will be fixed.

  // Like SerializeWithCachedSizes, but writes directly to *target, returning
  // a pointer to the byte immediately after the last byte written.  "target"
  // must point at a byte array of at least ByteSize() bytes.  Whether to use
  // deterministic serialization, e.g., maps in sorted order, is determined by
  // CodedOutputStream::IsDefaultSerializationDeterministic().
  public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer target);
  public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer target);
  public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] target);

  // Returns the result of the last call to ByteSize().  An embedded message's
  // size is needed both to serialize it (because embedded messages are
  // length-delimited) and to compute the outer message's size.  Caching
  // the size avoids computing it multiple times.
  //
  // ByteSize() does not automatically use the cached size when available
  // because this would require invalidating it every time the message was
  // modified, which would be too hard and expensive.  (E.g. if a deeply-nested
  // sub-message is changed, all of its parents' cached sizes would need to be
  // invalidated, which is too much work for an otherwise inlined setter
  // method.)
  public native int GetCachedSize();

  public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray(@Cast("bool") boolean deterministic,
                                                           @Cast("google::protobuf::uint8*") BytePointer target);
  public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray(@Cast("bool") boolean deterministic,
                                                           @Cast("google::protobuf::uint8*") ByteBuffer target);
  public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray(@Cast("bool") boolean deterministic,
                                                           @Cast("google::protobuf::uint8*") byte[] target);
}



// DO NOT USE: For migration only. Will be removed when Proto3 defaults to
// preserve unknowns.
@Namespace("google::protobuf::internal") public static native @Cast("bool") boolean GetProto3PreserveUnknownsDefault();

// DO NOT USE: For migration only. Will be removed when Proto3 defaults to
// preserve unknowns.
@Namespace("google::protobuf::internal") public static native void SetProto3PreserveUnknownsDefault(@Cast("bool") boolean preserve);
  // namespace internal


  // namespace protobuf

  // namespace google
// #endif  // GOOGLE_PROTOBUF_MESSAGE_LITE_H__


// Parsed from google/protobuf/unknown_field_set.h

// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc.  All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
//     * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//     * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//     * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

// Author: [email protected] (Kenton Varda)
//  Based on original Protocol Buffers design by
//  Sanjay Ghemawat, Jeff Dean, and others.
//
// Contains classes used to keep track of unrecognized fields seen while
// parsing a protocol message.

// #ifndef GOOGLE_PROTOBUF_UNKNOWN_FIELD_SET_H__
// #define GOOGLE_PROTOBUF_UNKNOWN_FIELD_SET_H__

// #include 
// #include 
// #include 
// #include 
// #include 
// #include          // coded_stream.h        // coded_stream.h      // zero_copy_stream.h
  
    @Namespace("google::protobuf::internal") @Opaque public static class InternalMetadataWithArena extends Pointer {
        /** Empty constructor. Calls {@code super((Pointer)null)}. */
        public InternalMetadataWithArena() { super((Pointer)null); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public InternalMetadataWithArena(Pointer p) { super(p); }
    }  // metadata.h
    @Namespace("google::protobuf::internal") @Opaque public static class WireFormat extends Pointer {
        /** Empty constructor. Calls {@code super((Pointer)null)}. */
        public WireFormat() { super((Pointer)null); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public WireFormat(Pointer p) { super(p); }
    }               // wire_format.h
    @Namespace("google::protobuf::internal") @Opaque public static class MessageSetFieldSkipperUsingCord extends Pointer {
        /** Empty constructor. Calls {@code super((Pointer)null)}. */
        public MessageSetFieldSkipperUsingCord() { super((Pointer)null); }
        /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
        public MessageSetFieldSkipperUsingCord(Pointer p) { super(p); }
    }
                                    // extension_set_heavy.cc
                        // message.h                 // below

// An UnknownFieldSet contains fields that were encountered while parsing a
// message but were not defined by its type.  Keeping track of these can be
// useful, especially in that they may be written if the message is serialized
// again without being cleared in between.  This means that software which
// simply receives messages and forwards them to other servers does not need
// to be updated every time a new field is added to the message definition.
//
// To get the UnknownFieldSet attached to any message, call
// Reflection::GetUnknownFields().
//
// This class is necessarily tied to the protocol buffer wire format, unlike
// the Reflection interface which is independent of any serialization scheme.
@Namespace("google::protobuf") @NoOffset public static class UnknownFieldSet extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public UnknownFieldSet(Pointer p) { super(p); }
    /** Native array allocator. Access with {@link Pointer#position(long)}. */
    public UnknownFieldSet(long size) { super((Pointer)null); allocateArray(size); }
    private native void allocateArray(long size);
    @Override public UnknownFieldSet position(long position) {
        return (UnknownFieldSet)super.position(position);
    }

  public UnknownFieldSet() { super((Pointer)null); allocate(); }
  private native void allocate();

  // Remove all fields.
  public native void Clear();

  // Remove all fields and deallocate internal data objects
  public native void ClearAndFreeMemory();

  // Is this set empty?
  public native @Cast("bool") boolean empty();

  // Merge the contents of some other UnknownFieldSet with this one.
  public native void MergeFrom(@Const @ByRef UnknownFieldSet other);

  // Similar to above, but this function will destroy the contents of other.
  public native void MergeFromAndDestroy(UnknownFieldSet other);

  // Merge the contents an UnknownFieldSet with the UnknownFieldSet in
  // *metadata, if there is one.  If *metadata doesn't have an UnknownFieldSet
  // then add one to it and make it be a copy of the first arg.
  public static native void MergeToInternalMetdata(
        @Const @ByRef UnknownFieldSet other,
        InternalMetadataWithArena metadata);

  // Swaps the contents of some other UnknownFieldSet with this one.
  public native void Swap(UnknownFieldSet x);

  // Computes (an estimate of) the total number of bytes currently used for
  // storing the unknown fields in memory. Does NOT include
  // sizeof(*this) in the calculation.
  public native @Cast("size_t") long SpaceUsedExcludingSelfLong();

  public native int SpaceUsedExcludingSelf();

  // Version of SpaceUsed() including sizeof(*this).
  public native @Cast("size_t") long SpaceUsedLong();

  public native int SpaceUsed();

  // Returns the number of fields present in the UnknownFieldSet.
  public native int field_count();
  // Get a field in the set, where 0 <= index < field_count().  The fields
  // appear in the order in which they were added.
  public native @Const @ByRef UnknownField field(int index);
  // Get a mutable pointer to a field in the set, where
  // 0 <= index < field_count().  The fields appear in the order in which
  // they were added.
  public native UnknownField mutable_field(int index);

  // Adding fields ---------------------------------------------------

  public native void AddVarint(int number, @Cast("google::protobuf::uint64") long value);
  public native void AddFixed32(int number, @Cast("google::protobuf::uint32") int value);
  public native void AddFixed64(int number, @Cast("google::protobuf::uint64") long value);
  public native void AddLengthDelimited(int number, @StdString BytePointer value);
  public native void AddLengthDelimited(int number, @StdString String value);
  public native @StdString @Cast({"char*", "std::string*"}) BytePointer AddLengthDelimited(int number);
  public native UnknownFieldSet AddGroup(int number);

  // Adds an unknown field from another set.
  public native void AddField(@Const @ByRef UnknownField field);

  // Delete fields with indices in the range [start .. start+num-1].
  // Caution: implementation moves all fields with indices [start+num .. ].
  public native void DeleteSubrange(int start, int num);

  // Delete all fields with a specific field number. The order of left fields
  // is preserved.
  // Caution: implementation moves all fields after the first deleted field.
  public native void DeleteByNumber(int number);

  // Parsing helpers -------------------------------------------------
  // These work exactly like the similarly-named methods of Message.

  public native @Cast("bool") boolean MergeFromCodedStream(CodedInputStream input);
  public native @Cast("bool") boolean ParseFromCodedStream(CodedInputStream input);
  public native @Cast("bool") boolean ParseFromZeroCopyStream(ZeroCopyInputStream input);
  public native @Cast("bool") boolean ParseFromArray(@Const Pointer data, int size);
  public native @Cast("bool") boolean ParseFromString(@StdString BytePointer data);
  public native @Cast("bool") boolean ParseFromString(@StdString String data);

  public static native @Const UnknownFieldSet default_instance();
}

// Represents one field in an UnknownFieldSet.
@Namespace("google::protobuf") public static class UnknownField extends Pointer {
    static { Loader.load(); }
    /** Default native constructor. */
    public UnknownField() { super((Pointer)null); allocate(); }
    /** Native array allocator. Access with {@link Pointer#position(long)}. */
    public UnknownField(long size) { super((Pointer)null); allocateArray(size); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public UnknownField(Pointer p) { super(p); }
    private native void allocate();
    private native void allocateArray(long size);
    @Override public UnknownField position(long position) {
        return (UnknownField)super.position(position);
    }

  /** enum google::protobuf::UnknownField::Type */
  public static final int
    TYPE_VARINT = 0,
    TYPE_FIXED32 = 1,
    TYPE_FIXED64 = 2,
    TYPE_LENGTH_DELIMITED = 3,
    TYPE_GROUP = 4;

  // The field's field number, as seen on the wire.
  public native int number();

  // The field type.
  public native @Cast("google::protobuf::UnknownField::Type") int type();

  // Accessors -------------------------------------------------------
  // Each method works only for UnknownFields of the corresponding type.

  public native @Cast("google::protobuf::uint64") long varint();
  public native @Cast("google::protobuf::uint32") int fixed32();
  public native @Cast("google::protobuf::uint64") long fixed64();
  public native @StdString BytePointer length_delimited();
  public native @Const @ByRef UnknownFieldSet group();

  public native void set_varint(@Cast("google::protobuf::uint64") long value);
  public native void set_fixed32(@Cast("google::protobuf::uint32") int value);
  public native void set_fixed64(@Cast("google::protobuf::uint64") long value);
  public native void set_length_delimited(@StdString BytePointer value);
  public native void set_length_delimited(@StdString String value);
  public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_length_delimited();
  public native UnknownFieldSet mutable_group();

  // Serialization API.
  // These methods can take advantage of the underlying implementation and may
  // archieve a better performance than using getters to retrieve the data and
  // do the serialization yourself.
  public native void SerializeLengthDelimitedNoTag(CodedOutputStream output);
  public native @Cast("google::protobuf::uint8*") BytePointer SerializeLengthDelimitedNoTagToArray(@Cast("google::protobuf::uint8*") BytePointer target);
  public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeLengthDelimitedNoTagToArray(@Cast("google::protobuf::uint8*") ByteBuffer target);
  public native @Cast("google::protobuf::uint8*") byte[] SerializeLengthDelimitedNoTagToArray(@Cast("google::protobuf::uint8*") byte[] target);

  public native @Cast("size_t") long GetLengthDelimitedSize();


  // If this UnknownField contains a pointer, delete it.
  public native void Delete();

  // Reset all the underlying pointers to NULL. A special function to be only
  // used while merging from a temporary UFS.
  public native void Reset();

  // Make a deep copy of any pointers in this UnknownField.
  public native void DeepCopy(@Const @ByRef UnknownField other);

  // Set the wire type of this UnknownField. Should only be used when this
  // UnknownField is being created.
  public native void SetType(@Cast("google::protobuf::UnknownField::Type") int type);

  public static class LengthDelimited extends Pointer {
      static { Loader.load(); }
      /** Default native constructor. */
      public LengthDelimited() { super((Pointer)null); allocate(); }
      /** Native array allocator. Access with {@link Pointer#position(long)}. */
      public LengthDelimited(long size) { super((Pointer)null); allocateArray(size); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public LengthDelimited(Pointer p) { super(p); }
      private native void allocate();
      private native void allocateArray(long size);
      @Override public LengthDelimited position(long position) {
          return (LengthDelimited)super.position(position);
      }
  
    public native @StdString @Cast({"char*", "std::string*"}) BytePointer string_value_(); public native LengthDelimited string_value_(BytePointer string_value_);
  }

  public native @Cast("google::protobuf::uint32") int number_(); public native UnknownField number_(int number_);
  public native @Cast("google::protobuf::uint32") int type_(); public native UnknownField type_(int type_);
    @Name("data_.varint_") public native @Cast("google::protobuf::uint64") long data__varint_(); public native UnknownField data__varint_(long data__varint_);
    @Name("data_.fixed32_") public native @Cast("google::protobuf::uint32") int data__fixed32_(); public native UnknownField data__fixed32_(int data__fixed32_);
    @Name("data_.fixed64_") public native @Cast("google::protobuf::uint64") long data__fixed64_(); public native UnknownField data__fixed64_(long data__fixed64_);
    @Name("data_.length_delimited_") public native @ByRef LengthDelimited data__length_delimited_(); public native UnknownField data__length_delimited_(LengthDelimited data__length_delimited_);
    @Name("data_.group_") public native UnknownFieldSet data__group_(); public native UnknownField data__group_(UnknownFieldSet data__group_);
}

// ===================================================================
// inline implementations











































  // namespace protobuf

  // namespace google
// #endif  // GOOGLE_PROTOBUF_UNKNOWN_FIELD_SET_H__


// Parsed from tensorflow/core/platform/default/integral_types.h

/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// #ifndef TENSORFLOW_CORE_PLATFORM_DEFAULT_INTEGRAL_TYPES_H_
// #define TENSORFLOW_CORE_PLATFORM_DEFAULT_INTEGRAL_TYPES_H_

// IWYU pragma: private, include "third_party/tensorflow/core/platform/types.h"
// IWYU pragma: friend third_party/tensorflow/core/platform/types.h

  // namespace tensorflow

// #endif  // TENSORFLOW_CORE_PLATFORM_DEFAULT_INTEGRAL_TYPES_H_


// Parsed from tensorflow/core/lib/bfloat16/bfloat16.h

/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// #ifndef TENSORFLOW_CORE_LIB_BFLOAT16_BFLOAT16_H_
// #define TENSORFLOW_CORE_LIB_BFLOAT16_BFLOAT16_H_

// #include 
// #include 

// #include "tensorflow/core/platform/byte_order.h"

// #ifdef __CUDACC__
// All functions callable from CUDA code must be qualified with __device__
// #define B16_DEVICE_FUNC __host__ __device__

// #else
// #define B16_DEVICE_FUNC

// #endif
@Name("Eigen::half") @Opaque public static class ShortPointer extends Pointer {
    /** Empty constructor. Calls {@code super((Pointer)null)}. */
    public ShortPointer() { super((Pointer)null); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public ShortPointer(Pointer p) { super(p); }
}


// Single precision complex.
// Double precision complex.

// see framework/bfloat16.h for description.
@Namespace("tensorflow") @NoOffset public static class bfloat16 extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public bfloat16(Pointer p) { super(p); }

  // The default constructor must yield a zero value, not an uninitialized
  // value; some TF kernels use T() as a zero value.
  public bfloat16() { super((Pointer)null); allocate(); }
  private native void allocate();

  public static native @ByVal bfloat16 truncate_to_bfloat16(float v);

  public bfloat16(float v) { super((Pointer)null); allocate(v); }
  private native void allocate(float v);

  public bfloat16(double val) { super((Pointer)null); allocate(val); }
  private native void allocate(double val);
  // Following the convention of numpy, converting between complex and
  // float will lead to loss of imag value.
  public bfloat16(@Cast("const tensorflow::complex64*") @ByRef FloatPointer val) { super((Pointer)null); allocate(val); }
  private native void allocate(@Cast("const tensorflow::complex64*") @ByRef FloatPointer val);
  public bfloat16(@Cast("const tensorflow::complex64*") @ByRef FloatBuffer val) { super((Pointer)null); allocate(val); }
  private native void allocate(@Cast("const tensorflow::complex64*") @ByRef FloatBuffer val);
  public bfloat16(@Cast("const tensorflow::complex64*") @ByRef float... val) { super((Pointer)null); allocate(val); }
  private native void allocate(@Cast("const tensorflow::complex64*") @ByRef float... val);

  public bfloat16(@Cast("const tensorflow::complex128*") @ByRef DoublePointer val) { super((Pointer)null); allocate(val); }
  private native void allocate(@Cast("const tensorflow::complex128*") @ByRef DoublePointer val);
  public bfloat16(@Cast("const tensorflow::complex128*") @ByRef DoubleBuffer val) { super((Pointer)null); allocate(val); }
  private native void allocate(@Cast("const tensorflow::complex128*") @ByRef DoubleBuffer val);
  public bfloat16(@Cast("const tensorflow::complex128*") @ByRef double[] val) { super((Pointer)null); allocate(val); }
  private native void allocate(@Cast("const tensorflow::complex128*") @ByRef double[] val);

  public bfloat16(@Cast("const unsigned short") short val) { super((Pointer)null); allocate(val); }
  private native void allocate(@Cast("const unsigned short") short val);

  public bfloat16(@Cast("const unsigned int") int val) { super((Pointer)null); allocate(val); }
  private native void allocate(@Cast("const unsigned int") int val);

  public bfloat16(long val) { super((Pointer)null); allocate(val); }
  private native void allocate(long val);

  public native @Name("operator float") float asFloat();

  public native @Cast("bool") @Name("operator bool") boolean asBoolean();

  public native @Cast("Eigen::half") @Name("operator Eigen::half") short asShort();

  public native @Name("operator int") int asInt();

  public native @Name("operator long") long asLong();

  public native @Cast("char") @Name("operator char") byte asByte();

  public native @Name("operator double") double asDouble();

  public native @ByVal @Cast("tensorflow::complex64*") @Name("operator tensorflow::complex64") FloatPointer asFloatPointer();

  public native @ByVal @Cast("tensorflow::complex128*") @Name("operator tensorflow::complex128") DoublePointer asDoublePointer();

  public static class FP32 extends Pointer {
      static { Loader.load(); }
      /** Default native constructor. */
      public FP32() { super((Pointer)null); allocate(); }
      /** Native array allocator. Access with {@link Pointer#position(long)}. */
      public FP32(long size) { super((Pointer)null); allocateArray(size); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public FP32(Pointer p) { super(p); }
      private native void allocate();
      private native void allocateArray(long size);
      @Override public FP32 position(long position) {
          return (FP32)super.position(position);
      }
  
    public native @Cast("unsigned int") int u(); public native FP32 u(int u);
    public native float f(); public native FP32 f(float f);
  }

  // Converts a float point to bfloat16, with round-nearest-to-even as rounding
  // method.
  // TODO: There is a slightly faster implementation (8% faster on CPU)
  // than this (documented in cl/175987786), that is exponentially harder to
  // understand and document. Switch to the faster version when converting to
  // BF16 becomes compute-bound.
  public static native @ByVal bfloat16 round_to_bfloat16(float v);

  public static native @ByVal bfloat16 epsilon();

  public static native @ByVal bfloat16 highest();

  public static native @ByVal bfloat16 lowest();

  public native @Cast("uint16_t") short value(); public native bfloat16 value(short value);

  // A value that represents "not a number".
  @MemberGetter public static native @Cast("const uint16_t") short NAN_VALUE();
  public static final short NAN_VALUE = NAN_VALUE();
}

@Namespace("tensorflow") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os,
                                                @Const @ByRef bfloat16 dt);

@Namespace("tensorflow") public static native @ByVal @Name("operator +") bfloat16 add(@ByVal bfloat16 a, @ByVal bfloat16 b);
@Namespace("tensorflow") public static native @ByVal @Name("operator +") bfloat16 add(@ByVal bfloat16 a, int b);
@Namespace("tensorflow") public static native @ByVal @Name("operator +") bfloat16 add(int a, @ByVal bfloat16 b);
@Namespace("tensorflow") public static native @ByVal @Name("operator -") bfloat16 subtract(@ByVal bfloat16 a, @ByVal bfloat16 b);
@Namespace("tensorflow") public static native @ByVal @Name("operator *") bfloat16 multiply(@ByVal bfloat16 a, @ByVal bfloat16 b);
@Namespace("tensorflow") public static native @ByVal @Name("operator /") bfloat16 divide(@ByVal bfloat16 a, @ByVal bfloat16 b);
@Namespace("tensorflow") public static native @ByVal @Name("operator -") bfloat16 subtract(@ByVal bfloat16 a);
@Namespace("tensorflow") public static native @Cast("bool") @Name("operator <") boolean lessThan(@ByVal bfloat16 a, @ByVal bfloat16 b);
@Namespace("tensorflow") public static native @Cast("bool") @Name("operator <=") boolean lessThanEquals(@ByVal bfloat16 a, @ByVal bfloat16 b);
@Namespace("tensorflow") public static native @Cast("bool") @Name("operator ==") boolean equals(@ByVal bfloat16 a, @ByVal bfloat16 b);
@Namespace("tensorflow") public static native @Cast("bool") @Name("operator !=") boolean notEquals(@ByVal bfloat16 a, @ByVal bfloat16 b);
@Namespace("tensorflow") public static native @Cast("bool") @Name("operator >") boolean greaterThan(@ByVal bfloat16 a, @ByVal bfloat16 b);
@Namespace("tensorflow") public static native @Cast("bool") @Name("operator >=") boolean greaterThanEquals(@ByVal bfloat16 a, @ByVal bfloat16 b);
@Namespace("tensorflow") public static native @ByRef @Name("operator +=") bfloat16 addPut(@ByRef bfloat16 a, @ByVal bfloat16 b);
@Namespace("tensorflow") public static native @ByRef @Name("operator -=") bfloat16 subtractPut(@ByRef bfloat16 a, @ByVal bfloat16 b);
@Namespace("tensorflow") public static native @ByVal @Name("operator ++") bfloat16 increment(@ByRef bfloat16 a);
@Namespace("tensorflow") public static native @ByVal @Name("operator --") bfloat16 decrement(@ByRef bfloat16 a);
@Namespace("tensorflow") public static native @ByVal @Name("operator ++") bfloat16 increment(@ByRef bfloat16 a, int arg1);
@Namespace("tensorflow") public static native @ByVal @Name("operator --") bfloat16 decrement(@ByRef bfloat16 a, int arg1);
@Namespace("tensorflow") public static native @ByRef @Name("operator *=") bfloat16 multiplyPut(@ByRef bfloat16 a, @ByVal bfloat16 b);
@Namespace("tensorflow") public static native @ByRef @Name("operator /=") bfloat16 dividePut(@ByRef bfloat16 a, @ByVal bfloat16 b);
  // end namespace tensorflow
@Name("std::hash") public static class HalfHash extends Pointer {
    static { Loader.load(); }
    /** Default native constructor. */
    public HalfHash() { super((Pointer)null); allocate(); }
    /** Native array allocator. Access with {@link Pointer#position(long)}. */
    public HalfHash(long size) { super((Pointer)null); allocateArray(size); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public HalfHash(Pointer p) { super(p); }
    private native void allocate();
    private native void allocateArray(long size);
    @Override public HalfHash position(long position) {
        return (HalfHash)super.position(position);
    }

  public native @Cast("std::size_t") @Name("operator ()") long apply(@Const @ByRef bfloat16 v);
}
@Namespace("std") public static native @Cast("bool") boolean isinf(@Const @ByRef bfloat16 a);
@Namespace("std") public static native @Cast("bool") boolean isnan(@Const @ByRef bfloat16 a);
@Namespace("std") public static native @Cast("bool") boolean isfinite(@Const @ByRef bfloat16 a);
@Namespace("std") public static native @ByVal bfloat16 abs(@Const @ByRef bfloat16 a);
@Namespace("std") public static native @ByVal bfloat16 exp(@Const @ByRef bfloat16 a);
@Namespace("std") public static native @ByVal bfloat16 log(@Const @ByRef bfloat16 a);
@Namespace("std") public static native @ByVal bfloat16 log10(@Const @ByRef bfloat16 a);
@Namespace("std") public static native @ByVal bfloat16 sqrt(@Const @ByRef bfloat16 a);
@Namespace("std") public static native @ByVal bfloat16 pow(@Const @ByRef bfloat16 a, @Const @ByRef bfloat16 b);
@Namespace("std") public static native @ByVal bfloat16 sin(@Const @ByRef bfloat16 a);
@Namespace("std") public static native @ByVal bfloat16 cos(@Const @ByRef bfloat16 a);
@Namespace("std") public static native @ByVal bfloat16 tan(@Const @ByRef bfloat16 a);
@Namespace("std") public static native @ByVal bfloat16 tanh(@Const @ByRef bfloat16 a);
@Namespace("std") public static native @ByVal bfloat16 floor(@Const @ByRef bfloat16 a);
@Namespace("std") public static native @ByVal bfloat16 ceil(@Const @ByRef bfloat16 a);
  // namespace std

// #endif  // TENSORFLOW_CORE_LIB_BFLOAT16_BFLOAT16_H_


// Parsed from tensorflow/core/framework/numeric_types.h

/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// #ifndef TENSORFLOW_CORE_FRAMEWORK_NUMERIC_TYPES_H_
// #define TENSORFLOW_CORE_FRAMEWORK_NUMERIC_TYPES_H_

// #include 
// #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
// Disable clang-format to prevent 'FixedPoint' header from being included
// before 'Tensor' header on which it depends.
// clang-format off
// #include "third_party/eigen3/unsupported/Eigen/CXX11/FixedPoint"
// clang-format on

// #include "tensorflow/core/lib/bfloat16/bfloat16.h"
// #include "tensorflow/core/platform/types.h"

// Single precision complex.
// Double precision complex.

// We use Eigen's QInt implementations for our quantized int types.

  // namespace tensorflow




public static native @ByVal bfloat16 FloatToBFloat16(float float_val);
// TODO(xpan): We probably need to overwrite more methods to have correct eigen
// behavior. E.g. epsilon(), dummy_precision, etc. See NumTraits.h in eigen.
@Name("Eigen::NumTraits") public static class bfloat16NumTraits extends Pointer {
    static { Loader.load(); }
    /** Default native constructor. */
    public bfloat16NumTraits() { super((Pointer)null); allocate(); }
    /** Native array allocator. Access with {@link Pointer#position(long)}. */
    public bfloat16NumTraits(long size) { super((Pointer)null); allocateArray(size); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public bfloat16NumTraits(Pointer p) { super(p); }
    private native void allocate();
    private native void allocateArray(long size);
    @Override public bfloat16NumTraits position(long position) {
        return (bfloat16NumTraits)super.position(position);
    }

  /** enum Eigen::NumTraits:: */
  public static final int
    IsInteger = 0,
    IsSigned = 1,
    RequireInitialization = 0;
  public static native @ByVal bfloat16 highest();

  public static native @ByVal bfloat16 lowest();

  public static native @ByVal bfloat16 infinity();

  public static native @ByVal bfloat16 quiet_NaN();
}

  // namespace numext
  // namespace Eigen

// #if defined(_MSC_VER) && !defined(__clang__)
  // namespace std
// #endif  // _MSC_VER

// #endif  // TENSORFLOW_CORE_FRAMEWORK_NUMERIC_TYPES_H_


// Parsed from tensorflow/core/platform/init_main.h

/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// #ifndef TENSORFLOW_CORE_PLATFORM_INIT_MAIN_H_
// #define TENSORFLOW_CORE_PLATFORM_INIT_MAIN_H_

// Platform-specific initialization routine that may be invoked by a
// main() program that uses TensorFlow.
//
// Default implementation does nothing.
@Namespace("tensorflow::port") public static native void InitMain(@Cast("const char*") BytePointer usage, IntPointer argc, @Cast("char***") @ByPtrPtr PointerPointer argv);
@Namespace("tensorflow::port") public static native void InitMain(String usage, IntBuffer argc, @Cast("char***") @ByPtrPtr PointerPointer argv);
@Namespace("tensorflow::port") public static native void InitMain(@Cast("const char*") BytePointer usage, int[] argc, @Cast("char***") @ByPtrPtr PointerPointer argv);
@Namespace("tensorflow::port") public static native void InitMain(String usage, IntPointer argc, @Cast("char***") @ByPtrPtr PointerPointer argv);
@Namespace("tensorflow::port") public static native void InitMain(@Cast("const char*") BytePointer usage, IntBuffer argc, @Cast("char***") @ByPtrPtr PointerPointer argv);
@Namespace("tensorflow::port") public static native void InitMain(String usage, int[] argc, @Cast("char***") @ByPtrPtr PointerPointer argv);

  // namespace port
  // namespace tensorflow

// #endif  // TENSORFLOW_CORE_PLATFORM_INIT_MAIN_H_


// Parsed from tensorflow/core/platform/types.h

/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// #ifndef TENSORFLOW_CORE_PLATFORM_TYPES_H_
// #define TENSORFLOW_CORE_PLATFORM_TYPES_H_

// #include 
// #include "tensorflow/core/platform/platform.h"

// Include appropriate platform-dependent implementations
// #if defined(PLATFORM_GOOGLE) || defined(GOOGLE_INTEGRAL_TYPES)
// #include "tensorflow/core/platform/google/integral_types.h"
// #elif defined(PLATFORM_WINDOWS)
// #include "tensorflow/core/platform/windows/integral_types.h"
// #elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID) ||
//     defined(PLATFORM_GOOGLE_ANDROID)
// #include "tensorflow/core/platform/default/integral_types.h"
// #else
// #error Define the appropriate PLATFORM_ macro for this platform
// #endif

// Define tensorflow::string to refer to appropriate platform specific type.
// TODO(josh11b): Move this into the platform/*/integral_types.h files
// above, and rename them platform/*/types.h.
// #if defined(PLATFORM_GOOGLE)
// #else
// #endif

@Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::uint8") byte kuint8max();
public static final byte kuint8max = kuint8max();
@Namespace("tensorflow") @MemberGetter public static native short kuint16max();
public static final short kuint16max = kuint16max();
@Namespace("tensorflow") @MemberGetter public static native int kuint32max();
public static final int kuint32max = kuint32max();
@Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::uint64") long kuint64max();
public static final long kuint64max = kuint64max();
@Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::int8") byte kint8min();
public static final byte kint8min = kint8min();
@Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::int8") byte kint8max();
public static final byte kint8max = kint8max();
@Namespace("tensorflow") @MemberGetter public static native short kint16min();
public static final short kint16min = kint16min();
@Namespace("tensorflow") @MemberGetter public static native short kint16max();
public static final short kint16max = kint16max();
@Namespace("tensorflow") @MemberGetter public static native int kint32min();
public static final int kint32min = kint32min();
@Namespace("tensorflow") @MemberGetter public static native int kint32max();
public static final int kint32max = kint32max();
@Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::int64") long kint64min();
public static final long kint64min = kint64min();
@Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::int64") long kint64max();
public static final long kint64max = kint64max();

// A typedef for a uint64 used as a short fingerprint.

  // namespace tensorflow

// Alias namespace ::stream_executor as ::tensorflow::se.

  // namespace tensorflow

// #endif  // TENSORFLOW_CORE_PLATFORM_TYPES_H_


// Parsed from tensorflow/core/platform/mutex.h

/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// #ifndef TENSORFLOW_CORE_PLATFORM_MUTEX_H_
// #define TENSORFLOW_CORE_PLATFORM_MUTEX_H_

// #include "tensorflow/core/platform/platform.h"
// #include "tensorflow/core/platform/types.h"
/** enum tensorflow::ConditionResult */
public static final int kCond_Timeout = 0, kCond_MaybeNotified = 1;
  // namespace tensorflow

// Include appropriate platform-dependent implementations of mutex etc.
// #if defined(PLATFORM_GOOGLE)
// #elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID) ||
//     defined(PLATFORM_GOOGLE_ANDROID) || defined(PLATFORM_WINDOWS)
// #include "tensorflow/core/platform/default/mutex.h"
// #else
// #error Define the appropriate PLATFORM_ macro for this platform
// #endif

// The mutex library included above defines:
//   class mutex;
//   class mutex_lock;
//   class condition_variable;
// It also defines the following:

// Like "cv->wait(*mu)", except that it only waits for up to "ms" milliseconds.
//
// Returns kCond_Timeout if the timeout expired without this
// thread noticing a signal on the condition variable.  Otherwise may
// return either kCond_Timeout or kCond_MaybeNotified
@Namespace("tensorflow") public static native @Cast("tensorflow::ConditionResult") int WaitForMilliseconds(@Cast("tensorflow::mutex_lock*") Pointer mu, @Cast("tensorflow::condition_variable*") Pointer cv,
                                    @Cast("tensorflow::int64") long ms);
  // namespace tensorflow

// #endif  // TENSORFLOW_CORE_PLATFORM_MUTEX_H_


// Parsed from tensorflow/core/platform/macros.h

/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// #ifndef TENSORFLOW_CORE_PLATFORM_MACROS_H_
// #define TENSORFLOW_CORE_PLATFORM_MACROS_H_

// Compiler attributes
// #if (defined(__GNUC__) || defined(__APPLE__)) && !defined(SWIG)
// Compiler supports GCC-style attributes
// #define TF_ATTRIBUTE_NORETURN __attribute__((noreturn))
// #define TF_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
// #define TF_ATTRIBUTE_NOINLINE __attribute__((noinline))
// #define TF_ATTRIBUTE_UNUSED __attribute__((unused))
// #define TF_ATTRIBUTE_COLD __attribute__((cold))
// #define TF_ATTRIBUTE_WEAK __attribute__((weak))
// #define TF_PACKED __attribute__((packed))
// #define TF_MUST_USE_RESULT __attribute__((warn_unused_result))
// #define TF_PRINTF_ATTRIBUTE(string_index, first_to_check)
//   __attribute__((__format__(__printf__, string_index, first_to_check)))
// #define TF_SCANF_ATTRIBUTE(string_index, first_to_check)
//   __attribute__((__format__(__scanf__, string_index, first_to_check)))
// #elif defined(_MSC_VER)
// Non-GCC equivalents
// #define TF_ATTRIBUTE_NORETURN __declspec(noreturn)
// #define TF_ATTRIBUTE_ALWAYS_INLINE __forceinline
// #define TF_ATTRIBUTE_NOINLINE
// #define TF_ATTRIBUTE_UNUSED
// #define TF_ATTRIBUTE_COLD
// #define TF_ATTRIBUTE_WEAK
// #define TF_MUST_USE_RESULT
// #define TF_PACKED
// #define TF_PRINTF_ATTRIBUTE(string_index, first_to_check)
// #define TF_SCANF_ATTRIBUTE(string_index, first_to_check)
// #else
// Non-GCC equivalents
// #define TF_ATTRIBUTE_NORETURN
// #define TF_ATTRIBUTE_ALWAYS_INLINE
// #define TF_ATTRIBUTE_NOINLINE
// #define TF_ATTRIBUTE_UNUSED
// #define TF_ATTRIBUTE_COLD
// #define TF_ATTRIBUTE_WEAK
// #define TF_MUST_USE_RESULT
// #define TF_PACKED
// #define TF_PRINTF_ATTRIBUTE(string_index, first_to_check)
// #define TF_SCANF_ATTRIBUTE(string_index, first_to_check)
// #endif

// Control visiblity outside .so
// #if defined(_WIN32)
// #ifdef TF_COMPILE_LIBRARY
// #define TF_EXPORT __declspec(dllexport)
// #else
// #define TF_EXPORT __declspec(dllimport)
// #endif  // TF_COMPILE_LIBRARY
// #else
// #define TF_EXPORT __attribute__((visibility("default")))
// #endif  // _WIN32

// #ifdef __has_builtin
// #define TF_HAS_BUILTIN(x) __has_builtin(x)
// #else
// #define TF_HAS_BUILTIN(x) 0
// #endif

// Compilers can be told that a certain branch is not likely to be taken
// (for instance, a CHECK failure), and use that information in static
// analysis. Giving it this information can help it optimize for the
// common case in the absence of better information (ie.
// -fprofile-arcs).
//
// We need to disable this for GPU builds, though, since nvcc8 and older
// don't recognize `__builtin_expect` as a builtin, and fail compilation.
// #if (!defined(__NVCC__)) &&
//     (TF_HAS_BUILTIN(__builtin_expect) || (defined(__GNUC__) && __GNUC__ >= 3))
// #define TF_PREDICT_FALSE(x) (__builtin_expect(x, 0))
// #define TF_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
// #else
// #define TF_PREDICT_FALSE(x) (x)
// #define TF_PREDICT_TRUE(x) (x)
// #endif

// A macro to disallow the copy constructor and operator= functions
// This is usually placed in the private: declarations for a class.
// #define TF_DISALLOW_COPY_AND_ASSIGN(TypeName)
//   TypeName(const TypeName&) = delete;
//   void operator=(const TypeName&) = delete

// The TF_ARRAYSIZE(arr) macro returns the # of elements in an array arr.
//
// The expression TF_ARRAYSIZE(a) is a compile-time constant of type
// size_t.
// #define TF_ARRAYSIZE(a)
//   ((sizeof(a) / sizeof(*(a))) /
//    static_cast(!(sizeof(a) % sizeof(*(a)))))

// #if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L ||
//     (defined(_MSC_VER) && _MSC_VER >= 1900)
// Define this to 1 if the code is compiled in C++11 mode; leave it
// undefined otherwise.  Do NOT define it to 0 -- that causes
// '#ifdef LANG_CXX11' to behave differently from '#if LANG_CXX11'.
public static final int LANG_CXX11 = 1;
// #endif

// #if defined(__clang__) && defined(LANG_CXX11) && defined(__has_warning)
// #if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough")
// #define TF_FALLTHROUGH_INTENDED [[clang::fallthrough]]  // NOLINT
// #endif
// #endif

// #ifndef TF_FALLTHROUGH_INTENDED
// #define TF_FALLTHROUGH_INTENDED
//   do {
//   } while (0)
// #endif

// #endif  // TENSORFLOW_CORE_PLATFORM_MACROS_H_


// Parsed from tensorflow/core/util/port.h

/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// #ifndef TENSORFLOW_CORE_UTIL_PORT_H_
// #define TENSORFLOW_CORE_UTIL_PORT_H_

// Returns true if GOOGLE_CUDA is defined.
@Namespace("tensorflow") public static native @Cast("bool") boolean IsGoogleCudaEnabled();

// Returns true if GOOGLE_CUDA is defined, and the given CUDA version supports
// half-precision matrix multiplications and convolution operations.
@Namespace("tensorflow") public static native @Cast("bool") boolean CudaSupportsHalfMatMulAndConv();

// Returns true if INTEL_MKL is defined
@Namespace("tensorflow") public static native @Cast("bool") boolean IsMklEnabled();

  // end namespace tensorflow

// #endif  // TENSORFLOW_CORE_UTIL_PORT_H_


// Parsed from tensorflow/core/lib/core/error_codes.pb.h

// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: tensorflow/core/lib/core/error_codes.proto

// #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2flib_2fcore_2ferror_5fcodes_2eproto
// #define PROTOBUF_INCLUDED_tensorflow_2fcore_2flib_2fcore_2ferror_5fcodes_2eproto

// #include 

// #include 

// #if GOOGLE_PROTOBUF_VERSION < 3006000
// #error This file was generated by a newer version of protoc which is
// #error incompatible with your Protocol Buffer headers.  Please update
// #error your headers.
// #endif
// #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
// #error This file was generated by an older version of protoc which is
// #error incompatible with your Protocol Buffer headers.  Please
// #error regenerate this file with a newer version of protoc.
// #endif

// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include 
// #include   // IWYU pragma: export
// #include   // IWYU pragma: export
// #include 
// @@protoc_insertion_point(includes)
// #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2flib_2fcore_2ferror_5fcodes_2eproto
// Internal implementation detail -- do not use these members.
@Namespace("protobuf_tensorflow_2fcore_2flib_2fcore_2ferror_5fcodes_2eproto") public static class TableStruct extends Pointer {
    static { Loader.load(); }
    /** Default native constructor. */
    public TableStruct() { super((Pointer)null); allocate(); }
    /** Native array allocator. Access with {@link Pointer#position(long)}. */
    public TableStruct(long size) { super((Pointer)null); allocateArray(size); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public TableStruct(Pointer p) { super(p); }
    private native void allocate();
    private native void allocateArray(long size);
    @Override public TableStruct position(long position) {
        return (TableStruct)super.position(position);
    }

  @MemberGetter public static native @Cast("const google::protobuf::uint32") int offsets(int i);
  @MemberGetter public static native @Cast("const google::protobuf::uint32*") IntPointer offsets();
}
@Namespace("protobuf_tensorflow_2fcore_2flib_2fcore_2ferror_5fcodes_2eproto") public static native void AddDescriptors();
  // namespace protobuf_tensorflow_2fcore_2flib_2fcore_2ferror_5fcodes_2eproto
  // namespace error
  // namespace tensorflow

/** enum tensorflow::error::Code */
public static final int
  OK = 0,
  CANCELLED = 1,
  UNKNOWN = 2,
  INVALID_ARGUMENT = 3,
  DEADLINE_EXCEEDED = 4,
  NOT_FOUND = 5,
  ALREADY_EXISTS = 6,
  PERMISSION_DENIED = 7,
  UNAUTHENTICATED = 16,
  RESOURCE_EXHAUSTED = 8,
  FAILED_PRECONDITION = 9,
  ABORTED = 10,
  OUT_OF_RANGE = 11,
  UNIMPLEMENTED = 12,
  INTERNAL = 13,
  UNAVAILABLE = 14,
  DATA_LOSS = 15,
  DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_ = 20,
  Code_INT_MIN_SENTINEL_DO_NOT_USE_ = kint32min,
  Code_INT_MAX_SENTINEL_DO_NOT_USE_ = kint32max;
@Namespace("tensorflow::error") public static native @Cast("bool") boolean Code_IsValid(int value);
@Namespace("tensorflow::error") @MemberGetter public static native @Cast("const tensorflow::error::Code") int Code_MIN();
@Namespace("tensorflow::error") @MemberGetter public static native @Cast("const tensorflow::error::Code") int Code_MAX();
@Namespace("tensorflow::error") @MemberGetter public static native int Code_ARRAYSIZE();

@Namespace("tensorflow::error") public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer Code_descriptor();
@Namespace("tensorflow::error") public static native @StdString BytePointer Code_Name(@Cast("tensorflow::error::Code") int value);
@Namespace("tensorflow::error") public static native @Cast("bool") boolean Code_Parse(
    @StdString BytePointer name, @Cast("tensorflow::error::Code*") IntPointer value);
@Namespace("tensorflow::error") public static native @Cast("bool") boolean Code_Parse(
    @StdString String name, @Cast("tensorflow::error::Code*") IntBuffer value);
@Namespace("tensorflow::error") public static native @Cast("bool") boolean Code_Parse(
    @StdString BytePointer name, @Cast("tensorflow::error::Code*") int... value);
@Namespace("tensorflow::error") public static native @Cast("bool") boolean Code_Parse(
    @StdString String name, @Cast("tensorflow::error::Code*") IntPointer value);
@Namespace("tensorflow::error") public static native @Cast("bool") boolean Code_Parse(
    @StdString BytePointer name, @Cast("tensorflow::error::Code*") IntBuffer value);
@Namespace("tensorflow::error") public static native @Cast("bool") boolean Code_Parse(
    @StdString String name, @Cast("tensorflow::error::Code*") int... value);
// ===================================================================


// ===================================================================


// ===================================================================

// #ifdef __GNUC__
//   #pragma GCC diagnostic push
//   #pragma GCC diagnostic ignored "-Wstrict-aliasing"
// #endif  // __GNUC__
// #ifdef __GNUC__
//   #pragma GCC diagnostic pop
// #endif  // __GNUC__

// @@protoc_insertion_point(namespace_scope)

  // namespace error
  // namespace tensorflow


  // namespace protobuf
  // namespace google

// @@protoc_insertion_point(global_scope)

// #endif  // PROTOBUF_INCLUDED_tensorflow_2fcore_2flib_2fcore_2ferror_5fcodes_2eproto


// Parsed from tensorflow/core/platform/logging.h

/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// #ifndef TENSORFLOW_CORE_PLATFORM_LOGGING_H_
// #define TENSORFLOW_CORE_PLATFORM_LOGGING_H_

// #include "tensorflow/core/platform/platform.h"  // To pick up PLATFORM_define

// #if defined(PLATFORM_GOOGLE) || defined(PLATFORM_GOOGLE_ANDROID) ||
//     defined(GOOGLE_LOGGING)
// #include "tensorflow/core/platform/google/build_config/logging.h"
// #else
// #include "tensorflow/core/platform/default/logging.h"
// #endif
// Emit "message" as a log message to the log for the specified
// "severity" as if it came from a LOG call at "fname:line"
@Namespace("tensorflow::internal") public static native void LogString(@Cast("const char*") BytePointer fname, int line, int severity,
               @StdString BytePointer message);
@Namespace("tensorflow::internal") public static native void LogString(String fname, int line, int severity,
               @StdString String message);
  // namespace internal

  // namespace tensorflow

// #endif  // TENSORFLOW_CORE_PLATFORM_LOGGING_H_


// Parsed from tensorflow/core/lib/core/status.h

/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// #ifndef TENSORFLOW_CORE_LIB_CORE_STATUS_H_
// #define TENSORFLOW_CORE_LIB_CORE_STATUS_H_

// #include 
// #include 
// #include 
// #include 
// #include "tensorflow/core/lib/core/error_codes.pb.h"
// #include "tensorflow/core/lib/core/stringpiece.h"
// #include "tensorflow/core/platform/logging.h"
// #include "tensorflow/core/platform/macros.h"
// #include "tensorflow/core/platform/types.h"

// #if defined(__clang__)
// Only clang supports warn_unused_result as a type annotation.
// #endif

/** \ingroup core
 *  Denotes success or failure of a call in Tensorflow. */
@Namespace("tensorflow") @NoOffset public static class Status extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public Status(Pointer p) { super(p); }
    /** Native array allocator. Access with {@link Pointer#position(long)}. */
    public Status(long size) { super((Pointer)null); allocateArray(size); }
    private native void allocateArray(long size);
    @Override public Status position(long position) {
        return (Status)super.position(position);
    }

  /** Create a success status. */
  public Status() { super((Pointer)null); allocate(); }
  private native void allocate();

  /** \brief Create a status with the specified error code and msg as a
   *  human-readable string containing more detailed information. */
  public Status(@Cast("tensorflow::error::Code") int code, @StringPiece BytePointer msg) { super((Pointer)null); allocate(code, msg); }
  private native void allocate(@Cast("tensorflow::error::Code") int code, @StringPiece BytePointer msg);
  public Status(@Cast("tensorflow::error::Code") int code, @StringPiece String msg) { super((Pointer)null); allocate(code, msg); }
  private native void allocate(@Cast("tensorflow::error::Code") int code, @StringPiece String msg);

  /** Copy the specified status. */
  public Status(@Const @ByRef Status s) { super((Pointer)null); allocate(s); }
  private native void allocate(@Const @ByRef Status s);
  public native @Name("operator =") void put(@Const @ByRef Status s);

  public static native @ByVal Status OK();

  /** Returns true iff the status indicates success. */
  public native @Cast("bool") boolean ok();

  public native @Cast("tensorflow::error::Code") int code();

  public native @StdString BytePointer error_message();

  public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef Status x);
  
  ///
  public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef Status x);

  /** \brief If {@code ok()}, stores {@code new_status} into {@code *this}.  If {@code !ok()},
   *  preserves the current status, but may augment with additional
   *  information about {@code new_status}.
   * 
   *  Convenient way of keeping track of the first error encountered.
   *  Instead of:
   *    {@code if (overall_status.ok()) overall_status = new_status}
   *  Use:
   *    {@code overall_status.Update(new_status);} */
  public native void Update(@Const @ByRef Status new_status);

  /** \brief Return a string representation of this status suitable for
   *  printing. Returns the string {@code "OK"} for success. */
  public native @StdString BytePointer ToString();

  // Ignores any errors. This method does nothing except potentially suppress
  // complaints from any tools that are checking that errors are not dropped on
  // the floor.
  public native void IgnoreError();
}









/** \ingroup core */
@Namespace("tensorflow") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef Status x);

@Namespace("tensorflow") public static native @StdString @Cast({"char*", "std::string*"}) BytePointer TfCheckOpHelperOutOfLine(
    @Const @ByRef Status v, @Cast("const char*") BytePointer msg);
@Namespace("tensorflow") public static native @StdString @Cast({"char*", "std::string*"}) BytePointer TfCheckOpHelperOutOfLine(
    @Const @ByRef Status v, String msg);

@Namespace("tensorflow") public static native @StdString @Cast({"char*", "std::string*"}) BytePointer TfCheckOpHelper(@ByVal Status v,
                                           @Cast("const char*") BytePointer msg);
@Namespace("tensorflow") public static native @StdString @Cast({"char*", "std::string*"}) BytePointer TfCheckOpHelper(@ByVal Status v,
                                           String msg);

// #define TF_DO_CHECK_OK(val, level)
//   while (auto _result = ::tensorflow::TfCheckOpHelper(val, #val))
//   LOG(level) << *(_result)

public static native void TF_CHECK_OK(@ByVal Status val);
public static native void TF_QCHECK_OK(@ByVal Status val);

// DEBUG only version of TF_CHECK_OK.  Compiler still parses 'val' even in opt
// mode.
// #ifndef NDEBUG
// #define TF_DCHECK_OK(val) TF_CHECK_OK(val)
// #else
// #define TF_DCHECK_OK(val)
//   while (false && (::tensorflow::Status::OK() == (val))) LOG(FATAL)
// #endif

  // namespace tensorflow

// #endif  // TENSORFLOW_CORE_LIB_CORE_STATUS_H_


// Parsed from tensorflow/core/lib/io/zlib_compression_options.h

/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// #ifndef TENSORFLOW_LIB_IO_ZLIB_COMPRESSION_OPTIONS_H_
// #define TENSORFLOW_LIB_IO_ZLIB_COMPRESSION_OPTIONS_H_

// #include "tensorflow/core/platform/types.h"

@Namespace("tensorflow::io") @NoOffset public static class ZlibCompressionOptions extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public ZlibCompressionOptions(Pointer p) { super(p); }
    /** Native array allocator. Access with {@link Pointer#position(long)}. */
    public ZlibCompressionOptions(long size) { super((Pointer)null); allocateArray(size); }
    private native void allocateArray(long size);
    @Override public ZlibCompressionOptions position(long position) {
        return (ZlibCompressionOptions)super.position(position);
    }

  public ZlibCompressionOptions() { super((Pointer)null); allocate(); }
  private native void allocate();

  public static native @ByVal ZlibCompressionOptions DEFAULT();
  public static native @ByVal ZlibCompressionOptions RAW();
  public static native @ByVal ZlibCompressionOptions GZIP();

  // Defaults to Z_NO_FLUSH
  public native @Cast("tensorflow::int8") byte flush_mode(); public native ZlibCompressionOptions flush_mode(byte flush_mode);

  // Size of the buffer used for caching the data read from source file.
  public native @Cast("tensorflow::int64") long input_buffer_size(); public native ZlibCompressionOptions input_buffer_size(long input_buffer_size);

  // Size of the sink buffer where the compressed/decompressed data produced by
  // zlib is cached.
  public native @Cast("tensorflow::int64") long output_buffer_size(); public native ZlibCompressionOptions output_buffer_size(long output_buffer_size);

  // The window_bits parameter is the base two logarithm of the window size
  // (the size of the history buffer). Larger values of buffer size result in
  // better compression at the expense of memory usage.
  //
  // Accepted values:
  //
  // 8..15:
  // Normal deflate with zlib header and checksum.
  //
  // -8..-15:
  // Negative values can be used for raw deflate/inflate. In this case,
  // -window_bits determines the window size. deflate() will then generate raw
  // deflate data  with no zlib header or trailer, and will not compute an
  // adler32 check value. inflate() will then process raw deflate data, not
  // looking for a zlib or gzip header, not generating a check value, and not
  // looking for any check values for comparison at the end of the stream.
  //
  // 16 + [8..15]:
  // window_bits can also be greater than 15 for optional gzip encoding. Add 16
  // to window_bits to write a simple gzip header and trailer around the
  // compressed data instead of a zlib wrapper. The gzip header will have no
  // file name, no extra data, no comment, no modification time (set to zero),
  // no header crc, and the operating system will be set to 255 (unknown). If a
  // gzip stream is being written, strm->adler is a crc32 instead of an adler32.
  //
  // 0:
  // window_bits can also be zero to request that inflate use the window size
  // in the zlib header of the compressed stream.
  //
  // While inflating, window_bits must be greater than or equal to the
  // window_bits value provided used while compressing. If a compressed stream
  // with a larger window size is given as input, inflate() will return with the
  // error code Z_DATA_ERROR instead of trying to allocate a larger window.
  //
  // Defaults to MAX_WBITS
  public native @Cast("tensorflow::int8") byte window_bits(); public native ZlibCompressionOptions window_bits(byte window_bits);

  // From the zlib manual (http://www.zlib.net/manual.html):
  // The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9:
  // 1 gives best speed, 9 gives best compression, 0 gives no compression at all
  // (the input data is simply copied a block at a time). Z_DEFAULT_COMPRESSION
  // requests a default compromise between speed and compression (currently
  // equivalent to level 6).
  public native @Cast("tensorflow::int8") byte compression_level(); public native ZlibCompressionOptions compression_level(byte compression_level);

  // Only Z_DEFLATED is supported at this time.
  public native @Cast("tensorflow::int8") byte compression_method(); public native ZlibCompressionOptions compression_method(byte compression_method);

  // From the zlib manual (http://www.zlib.net/manual.html):
  // The mem_level parameter specifies how much memory should be allocated for
  // the internal compression state. mem_level=1 uses minimum memory but is slow
  // and reduces compression ratio; mem_level=9 uses maximum memory for optimal
  // speed. The default value is 8.
  public native @Cast("tensorflow::int8") byte mem_level(); public native ZlibCompressionOptions mem_level(byte mem_level);

  // From the zlib manual (http://www.zlib.net/manual.html):
  // The strategy parameter is used to tune the compression algorithm. Use the
  // value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by
  // a filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only
  // (no string match), or Z_RLE to limit match distances to one
  // (run-length encoding). Filtered data consists mostly of small values with
  // a somewhat random distribution. In this case, the compression algorithm is
  // tuned to compress them better. The effect of Z_FILTERED is to force more
  // Huffman coding and less string matching; it is somewhat intermediate
  // between Z_DEFAULT_STRATEGY and Z_HUFFMAN_ONLY. Z_RLE is designed to be
  // almost as fast as Z_HUFFMAN_ONLY, but give better compression for
  // PNG image data. The strategy parameter only affects the compression ratio
  // but not the correctness of the compressed output even if it is not set
  // appropriately. Z_FIXED prevents the use of dynamic Huffman codes, allowing
  // for a simpler decoder for special applications.
  public native @Cast("tensorflow::int8") byte compression_strategy(); public native ZlibCompressionOptions compression_strategy(byte compression_strategy);
}







  // namespace io
  // namespace tensorflow

// #endif  // TENSORFLOW_LIB_IO_ZLIB_COMPRESSION_OPTIONS_H_


// Parsed from tensorflow/core/lib/io/zlib_outputbuffer.h

/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// #ifndef TENSORFLOW_CORE_LIB_IO_COMPRESSED_OUTPUTBUFFER_H_
// #define TENSORFLOW_CORE_LIB_IO_COMPRESSED_OUTPUTBUFFER_H_

// #include 

// #include 

// #include "tensorflow/core/lib/core/status.h"
// #include "tensorflow/core/lib/io/zlib_compression_options.h"
// #include "tensorflow/core/platform/env.h"
// #include "tensorflow/core/platform/file_system.h"
// #include "tensorflow/core/platform/macros.h"
// #include "tensorflow/core/platform/types.h"

// Provides support for writing compressed output to file using zlib
// (http://www.zlib.net/).
// A given instance of an ZlibOutputBuffer is NOT safe for concurrent use
// by multiple threads
@Namespace("tensorflow::io") @NoOffset public static class ZlibOutputBuffer extends WritableFile {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public ZlibOutputBuffer(Pointer p) { super(p); }

  // Create an ZlibOutputBuffer for `file` with two buffers that cache the
  // 1. input data to be deflated
  // 2. the deflated output
  // with sizes `input_buffer_bytes` and `output_buffer_bytes` respectively.
  // Does not take ownership of `file`.
  // output_buffer_bytes should be greater than 1.
  public ZlibOutputBuffer(
        WritableFile file,
        int input_buffer_bytes,
        int output_buffer_bytes,
        @Const @ByRef ZlibCompressionOptions zlib_options) { super((Pointer)null); allocate(file, input_buffer_bytes, output_buffer_bytes, zlib_options); }
  private native void allocate(
        WritableFile file,
        int input_buffer_bytes,
        int output_buffer_bytes,
        @Const @ByRef ZlibCompressionOptions zlib_options);

  // Initializes some state necessary for the output buffer. This call is
  // required before any other operation on the buffer.
  public native @ByVal Status Init();

  // Adds `data` to the compression pipeline.
  //
  // The input data is buffered in `z_stream_input_` and is compressed in bulk
  // when the buffer gets full. The compressed output is not immediately
  // written to file but rather buffered in `z_stream_output_` and gets written
  // to file when the buffer is full.
  //
  // To immediately write contents to file call `Flush()`.
  public native @ByVal Status Append(@StringPiece BytePointer data);
  public native @ByVal Status Append(@StringPiece String data);

  // Deflates any cached input and writes all output to file.
  public native @ByVal Status Flush();

  // Compresses any cached input and writes all output to file. This must be
  // called before the destructor to avoid any data loss.
  //
  // Contrary to `Flush()` this informs zlib that it should not expect any
  // further input by using Z_FINISH flush mode. Also cleans up z_stream.
  //
  // After calling this, any further calls to `Write()`, `Flush()` or `Close()`
  // will fail.
  public native @ByVal Status Close();

  // Deflates any cached input, writes all output to file and syncs it.
  public native @ByVal Status Sync();
}

  // namespace io
  // namespace tensorflow

// #endif  // TENSORFLOW_CORE_LIB_IO_COMPRESSED_OUTPUTBUFFER_H_


// Parsed from tensorflow/core/lib/io/inputstream_interface.h

/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// #ifndef TENSORFLOW_CORE_LIB_IO_INPUTSTREAM_INTERFACE_H_
// #define TENSORFLOW_CORE_LIB_IO_INPUTSTREAM_INTERFACE_H_

// #include 
// #include "tensorflow/core/lib/core/status.h"
// #include "tensorflow/core/platform/types.h"

// An interface that defines input streaming operations.
@Namespace("tensorflow::io") public static class InputStreamInterface extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public InputStreamInterface(Pointer p) { super(p); }


  // Reads the next bytes_to_read from the file. Typical return codes:
  //  * OK - in case of success.
  //  * OUT_OF_RANGE - not enough bytes remaining before end of file.
  public native @ByVal Status ReadNBytes(@Cast("tensorflow::int64") long bytes_to_read, @StdString @Cast({"char*", "std::string*"}) BytePointer result);

  // Skips bytes_to_skip before next ReadNBytes. bytes_to_skip should be >= 0.
  // Typical return codes:
  //  * OK - in case of success.
  //  * OUT_OF_RANGE - not enough bytes remaining before end of file.
  public native @ByVal Status SkipNBytes(@Cast("tensorflow::int64") long bytes_to_skip);

  // Return the offset of the current byte relative to the beginning of the
  // file.
  // If we Skip / Read beyond the end of the file, this should return the length
  // of the file.
  // If there are any errors, this must return -1.
  public native @Cast("tensorflow::int64") long Tell();

  // Resets the stream to the beginning.
  public native @ByVal Status Reset();
}

  // namespace io
  // namespace tensorflow

// #endif  // TENSORFLOW_CORE_LIB_IO_INPUTSTREAM_INTERFACE_H_


// Parsed from tensorflow/core/lib/io/record_reader.h

/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// #ifndef TENSORFLOW_CORE_LIB_IO_RECORD_READER_H_
// #define TENSORFLOW_CORE_LIB_IO_RECORD_READER_H_

// #include "tensorflow/core/lib/core/errors.h"
// #include "tensorflow/core/lib/core/stringpiece.h"
// #include "tensorflow/core/lib/io/inputstream_interface.h"
// #if !defined(IS_SLIM_BUILD)
// #include "tensorflow/core/lib/io/zlib_compression_options.h"
// #include "tensorflow/core/lib/io/zlib_inputstream.h"
// #endif  // IS_SLIM_BUILD
// #include "tensorflow/core/platform/macros.h"
// #include "tensorflow/core/platform/types.h"

@Namespace("tensorflow::io") public static class RecordReaderOptions extends Pointer {
    static { Loader.load(); }
    /** Default native constructor. */
    public RecordReaderOptions() { super((Pointer)null); allocate(); }
    /** Native array allocator. Access with {@link Pointer#position(long)}. */
    public RecordReaderOptions(long size) { super((Pointer)null); allocateArray(size); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public RecordReaderOptions(Pointer p) { super(p); }
    private native void allocate();
    private native void allocateArray(long size);
    @Override public RecordReaderOptions position(long position) {
        return (RecordReaderOptions)super.position(position);
    }

  /** enum tensorflow::io::RecordReaderOptions::CompressionType */
  public static final int NONE = 0, ZLIB_COMPRESSION = 1;
  public native @Cast("tensorflow::io::RecordReaderOptions::CompressionType") int compression_type(); public native RecordReaderOptions compression_type(int compression_type);

  // If buffer_size is non-zero, then all reads must be sequential, and no
  // skipping around is permitted. (Note: this is the same behavior as reading
  // compressed files.) Consider using SequentialRecordReader.
  public native @Cast("tensorflow::int64") long buffer_size(); public native RecordReaderOptions buffer_size(long buffer_size);

  public static native @ByVal RecordReaderOptions CreateRecordReaderOptions(
        @StdString BytePointer compression_type);
  public static native @ByVal RecordReaderOptions CreateRecordReaderOptions(
        @StdString String compression_type);

// #if !defined(IS_SLIM_BUILD)
  // Options specific to zlib compression.
  public native @ByRef ZlibCompressionOptions zlib_options(); public native RecordReaderOptions zlib_options(ZlibCompressionOptions zlib_options);
// #endif  // IS_SLIM_BUILD
}

// Low-level interface to read TFRecord files.
//
// If using compression or buffering, consider using SequentialRecordReader.
//
// Note: this class is not thread safe; external synchronization required.
@Namespace("tensorflow::io") @NoOffset public static class RecordReader extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public RecordReader(Pointer p) { super(p); }

  // Format of a single record:
  //  uint64    length
  //  uint32    masked crc of length
  //  byte      data[length]
  //  uint32    masked crc of data
  @MemberGetter public static native @Cast("const size_t") long kHeaderSize();
  public static final long kHeaderSize = kHeaderSize();
  @MemberGetter public static native @Cast("const size_t") long kFooterSize();
  public static final long kFooterSize = kFooterSize();

  // Statistics (sizes are in units of bytes)
  public static class Stats extends Pointer {
      static { Loader.load(); }
      /** Default native constructor. */
      public Stats() { super((Pointer)null); allocate(); }
      /** Native array allocator. Access with {@link Pointer#position(long)}. */
      public Stats(long size) { super((Pointer)null); allocateArray(size); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public Stats(Pointer p) { super(p); }
      private native void allocate();
      private native void allocateArray(long size);
      @Override public Stats position(long position) {
          return (Stats)super.position(position);
      }
  
    public native @Cast("tensorflow::int64") long file_size(); public native Stats file_size(long file_size);
    public native @Cast("tensorflow::int64") long data_size(); public native Stats data_size(long data_size);
    public native @Cast("tensorflow::int64") long entries(); public native Stats entries(long entries);  // Number of values
  }

  // Metadata for the TFRecord file.
  public static class Metadata extends Pointer {
      static { Loader.load(); }
      /** Default native constructor. */
      public Metadata() { super((Pointer)null); allocate(); }
      /** Native array allocator. Access with {@link Pointer#position(long)}. */
      public Metadata(long size) { super((Pointer)null); allocateArray(size); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public Metadata(Pointer p) { super(p); }
      private native void allocate();
      private native void allocateArray(long size);
      @Override public Metadata position(long position) {
          return (Metadata)super.position(position);
      }
  
    public native @ByRef Stats stats(); public native Metadata stats(Stats stats);
  }

  // Create a reader that will return log records from "*file".
  // "*file" must remain live while this Reader is in use.
  public RecordReader(
        RandomAccessFile file,
        @Const @ByRef(nullValue = "tensorflow::io::RecordReaderOptions()") RecordReaderOptions options) { super((Pointer)null); allocate(file, options); }
  private native void allocate(
        RandomAccessFile file,
        @Const @ByRef(nullValue = "tensorflow::io::RecordReaderOptions()") RecordReaderOptions options);
  public RecordReader(
        RandomAccessFile file) { super((Pointer)null); allocate(file); }
  private native void allocate(
        RandomAccessFile file);

  // Read the record at "*offset" into *record and update *offset to
  // point to the offset of the next record.  Returns OK on success,
  // OUT_OF_RANGE for end of file, or something else for an error.
  public native @ByVal Status ReadRecord(@Cast("tensorflow::uint64*") LongPointer offset, @StdString @Cast({"char*", "std::string*"}) BytePointer record);
  public native @ByVal Status ReadRecord(@Cast("tensorflow::uint64*") LongBuffer offset, @StdString @Cast({"char*", "std::string*"}) BytePointer record);
  public native @ByVal Status ReadRecord(@Cast("tensorflow::uint64*") long[] offset, @StdString @Cast({"char*", "std::string*"}) BytePointer record);

  // Return the metadata of the Record file.
  //
  // The current implementation scans the file to completion,
  // skipping over the data regions, to extract the metadata once
  // on the first call to GetStats().  An improved implementation
  // would change RecordWriter to write the metadata into TFRecord
  // so that GetMetadata() could be a const method.
  //
  // 'metadata' must not be nullptr.
  public native @ByVal Status GetMetadata(Metadata md);
}

// High-level interface to read TFRecord files.
//
// Note: this class is not thread safe; external synchronization required.
@Namespace("tensorflow::io") @NoOffset public static class SequentialRecordReader extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public SequentialRecordReader(Pointer p) { super(p); }

  // Create a reader that will return log records from "*file".
  // "*file" must remain live while this Reader is in use.
  public SequentialRecordReader(
        RandomAccessFile file,
        @Const @ByRef(nullValue = "tensorflow::io::RecordReaderOptions()") RecordReaderOptions options) { super((Pointer)null); allocate(file, options); }
  private native void allocate(
        RandomAccessFile file,
        @Const @ByRef(nullValue = "tensorflow::io::RecordReaderOptions()") RecordReaderOptions options);
  public SequentialRecordReader(
        RandomAccessFile file) { super((Pointer)null); allocate(file); }
  private native void allocate(
        RandomAccessFile file);

  // Reads the next record in the file into *record. Returns OK on success,
  // OUT_OF_RANGE for end of file, or something else for an error.
  public native @ByVal Status ReadRecord(@StdString @Cast({"char*", "std::string*"}) BytePointer record);

  // Returns the current offset in the file.
  public native @Cast("tensorflow::uint64") long TellOffset();

  // Seek to this offset within the file and set this offset as the current
  // offset. Trying to seek backward will throw error.
  public native @ByVal Status SeekOffset(@Cast("tensorflow::uint64") long offset);
}

  // namespace io
  // namespace tensorflow

// #endif  // TENSORFLOW_CORE_LIB_IO_RECORD_READER_H_


// Parsed from tensorflow/core/lib/io/record_writer.h

/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// #ifndef TENSORFLOW_CORE_LIB_IO_RECORD_WRITER_H_
// #define TENSORFLOW_CORE_LIB_IO_RECORD_WRITER_H_

// #include "tensorflow/core/lib/core/coding.h"
// #include "tensorflow/core/lib/core/status.h"
// #include "tensorflow/core/lib/core/stringpiece.h"
// #include "tensorflow/core/lib/hash/crc32c.h"
// #if !defined(IS_SLIM_BUILD)
// #include "tensorflow/core/lib/io/zlib_compression_options.h"
// #include "tensorflow/core/lib/io/zlib_outputbuffer.h"
// #endif  // IS_SLIM_BUILD
// #include "tensorflow/core/platform/macros.h"
// #include "tensorflow/core/platform/types.h"

@Namespace("tensorflow::io") public static class RecordWriterOptions extends Pointer {
    static { Loader.load(); }
    /** Default native constructor. */
    public RecordWriterOptions() { super((Pointer)null); allocate(); }
    /** Native array allocator. Access with {@link Pointer#position(long)}. */
    public RecordWriterOptions(long size) { super((Pointer)null); allocateArray(size); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public RecordWriterOptions(Pointer p) { super(p); }
    private native void allocate();
    private native void allocateArray(long size);
    @Override public RecordWriterOptions position(long position) {
        return (RecordWriterOptions)super.position(position);
    }

  /** enum tensorflow::io::RecordWriterOptions::CompressionType */
  public static final int NONE = 0, ZLIB_COMPRESSION = 1;
  public native @Cast("tensorflow::io::RecordWriterOptions::CompressionType") int compression_type(); public native RecordWriterOptions compression_type(int compression_type);

  public static native @ByVal RecordWriterOptions CreateRecordWriterOptions(
        @StdString BytePointer compression_type);
  public static native @ByVal RecordWriterOptions CreateRecordWriterOptions(
        @StdString String compression_type);

// Options specific to zlib compression.
// #if !defined(IS_SLIM_BUILD)
  public native @ByRef ZlibCompressionOptions zlib_options(); public native RecordWriterOptions zlib_options(ZlibCompressionOptions zlib_options);
// #endif  // IS_SLIM_BUILD
}

@Namespace("tensorflow::io") @NoOffset public static class RecordWriter extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public RecordWriter(Pointer p) { super(p); }

  // Format of a single record:
  //  uint64    length
  //  uint32    masked crc of length
  //  byte      data[length]
  //  uint32    masked crc of data
  @MemberGetter public static native @Cast("const size_t") long kHeaderSize();
  public static final long kHeaderSize = kHeaderSize();
  @MemberGetter public static native @Cast("const size_t") long kFooterSize();
  public static final long kFooterSize = kFooterSize();

  // Create a writer that will append data to "*dest".
  // "*dest" must be initially empty.
  // "*dest" must remain live while this Writer is in use.
  public RecordWriter(WritableFile dest,
                 @Const @ByRef(nullValue = "tensorflow::io::RecordWriterOptions()") RecordWriterOptions options) { super((Pointer)null); allocate(dest, options); }
  private native void allocate(WritableFile dest,
                 @Const @ByRef(nullValue = "tensorflow::io::RecordWriterOptions()") RecordWriterOptions options);
  public RecordWriter(WritableFile dest) { super((Pointer)null); allocate(dest); }
  private native void allocate(WritableFile dest);

  // Calls Close() and logs if an error occurs.
  //
  // TODO(jhseu): Require that callers explicitly call Close() and remove the
  // implicit Close() call in the destructor.

  public native @ByVal Status WriteRecord(@StringPiece BytePointer slice);
  public native @ByVal Status WriteRecord(@StringPiece String slice);

  // Flushes any buffered data held by underlying containers of the
  // RecordWriter to the WritableFile. Does *not* flush the
  // WritableFile.
  public native @ByVal Status Flush();

  // Writes all output to the file. Does *not* close the WritableFile.
  //
  // After calling Close(), any further calls to `WriteRecord()` or `Flush()`
  // are invalid.
  public native @ByVal Status Close();

  // Utility method to populate TFRecord headers.  Populates record-header in
  // "header[0,kHeaderSize-1]".  The record-header is based on data[0, n-1].
  public static native void PopulateHeader(@Cast("char*") BytePointer header, @Cast("const char*") BytePointer data, @Cast("size_t") long n);
  public static native void PopulateHeader(@Cast("char*") ByteBuffer header, String data, @Cast("size_t") long n);
  public static native void PopulateHeader(@Cast("char*") byte[] header, @Cast("const char*") BytePointer data, @Cast("size_t") long n);
  public static native void PopulateHeader(@Cast("char*") BytePointer header, String data, @Cast("size_t") long n);
  public static native void PopulateHeader(@Cast("char*") ByteBuffer header, @Cast("const char*") BytePointer data, @Cast("size_t") long n);
  public static native void PopulateHeader(@Cast("char*") byte[] header, String data, @Cast("size_t") long n);

  // Utility method to populate TFRecord footers.  Populates record-footer in
  // "footer[0,kFooterSize-1]".  The record-footer is based on data[0, n-1].
  public static native void PopulateFooter(@Cast("char*") BytePointer footer, @Cast("const char*") BytePointer data, @Cast("size_t") long n);
  public static native void PopulateFooter(@Cast("char*") ByteBuffer footer, String data, @Cast("size_t") long n);
  public static native void PopulateFooter(@Cast("char*") byte[] footer, @Cast("const char*") BytePointer data, @Cast("size_t") long n);
  public static native void PopulateFooter(@Cast("char*") BytePointer footer, String data, @Cast("size_t") long n);
  public static native void PopulateFooter(@Cast("char*") ByteBuffer footer, @Cast("const char*") BytePointer data, @Cast("size_t") long n);
  public static native void PopulateFooter(@Cast("char*") byte[] footer, String data, @Cast("size_t") long n);
}





  // namespace io
  // namespace tensorflow

// #endif  // TENSORFLOW_CORE_LIB_IO_RECORD_WRITER_H_


// Parsed from tensorflow/core/platform/protobuf.h

/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// #ifndef TENSORFLOW_CORE_PLATFORM_PROTOBUF_H_
// #define TENSORFLOW_CORE_PLATFORM_PROTOBUF_H_

// #include "tensorflow/core/platform/platform.h"
// #include "tensorflow/core/platform/types.h"

// Import whatever namespace protobuf comes from into the
// ::tensorflow::protobuf namespace.
//
// TensorFlow code should use the ::tensorflow::protobuf namespace to
// refer to all protobuf APIs.

// #if defined(PLATFORM_GOOGLE) && !defined(USE_DEFAULT_PROTOBUF)
// #include "tensorflow/core/platform/google/protobuf.h"
// #else
// #include "tensorflow/core/platform/default/protobuf.h"
// #endif
// Parses a protocol buffer contained in a string in the binary wire format.
// Returns true on success. Note: Unlike protobuf's builtin ParseFromString,
// this function has no size restrictions on the total size of the encoded
// protocol buffer.
@Namespace("tensorflow") public static native @Cast("bool") boolean ParseProtoUnlimited(@Cast("tensorflow::protobuf::MessageLite*") MessageLite proto,
                         @StdString BytePointer serialized);
@Namespace("tensorflow") public static native @Cast("bool") boolean ParseProtoUnlimited(@Cast("tensorflow::protobuf::MessageLite*") MessageLite proto,
                         @StdString String serialized);
@Namespace("tensorflow") public static native @Cast("bool") boolean ParseProtoUnlimited(@Cast("tensorflow::protobuf::MessageLite*") MessageLite proto, @Const Pointer serialized,
                         @Cast("size_t") long size);

// Returns the string value for the value of a string or bytes protobuf field.
@Namespace("tensorflow") public static native @StdString BytePointer ProtobufStringToString(@StdString BytePointer s);
@Namespace("tensorflow") public static native @StdString String ProtobufStringToString(@StdString String s);

// Set  to . Swapping is allowed, as  does not need to be
// preserved.
@Namespace("tensorflow") public static native void SetProtobufStringSwapAllowed(@StdString @Cast({"char*", "std::string*"}) BytePointer src, @StdString @Cast({"char*", "std::string*"}) BytePointer dest);

  // namespace tensorflow

// #endif  // TENSORFLOW_CORE_PLATFORM_PROTOBUF_H_


// Parsed from tensorflow/core/platform/file_system.h

/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// #ifndef TENSORFLOW_CORE_PLATFORM_FILE_SYSTEM_H_
// #define TENSORFLOW_CORE_PLATFORM_FILE_SYSTEM_H_

// #include 
// #include 
// #include 
// #include 
// #include 
// #include "tensorflow/core/lib/core/errors.h"
// #include "tensorflow/core/lib/core/status.h"
// #include "tensorflow/core/lib/core/stringpiece.h"
// #include "tensorflow/core/platform/cord.h"
// #include "tensorflow/core/platform/file_statistics.h"
// #include "tensorflow/core/platform/macros.h"
// #include "tensorflow/core/platform/platform.h"
// #include "tensorflow/core/platform/types.h"

// #ifdef PLATFORM_WINDOWS
// #undef DeleteFile
// #endif

/** A generic interface for accessing a file system.  Implementations
 *  of custom filesystem adapters must implement this interface,
 *  RandomAccessFile, WritableFile, and ReadOnlyMemoryRegion classes. */
@Namespace("tensorflow") public static class FileSystem extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public FileSystem(Pointer p) { super(p); }

  /** \brief Creates a brand new random access read-only file with the
   *  specified name.
   * 
   *  On success, stores a pointer to the new file in
   *  *result and returns OK.  On failure stores NULL in *result and
   *  returns non-OK.  If the file does not exist, returns a non-OK
   *  status.
   * 
   *  The returned file may be concurrently accessed by multiple threads.
   * 
   *  The ownership of the returned RandomAccessFile is passed to the caller
   *  and the object should be deleted when is not used. */
  
  ///
  ///
  ///
  public native @ByVal Status NewRandomAccessFile(
        @StdString BytePointer fname, @UniquePtr RandomAccessFile result);
  public native @ByVal Status NewRandomAccessFile(
        @StdString String fname, @UniquePtr RandomAccessFile result);

  /** \brief Creates an object that writes to a new file with the specified
   *  name.
   * 
   *  Deletes any existing file with the same name and creates a
   *  new file.  On success, stores a pointer to the new file in
   *  *result and returns OK.  On failure stores NULL in *result and
   *  returns non-OK.
   * 
   *  The returned file will only be accessed by one thread at a time.
   * 
   *  The ownership of the returned WritableFile is passed to the caller
   *  and the object should be deleted when is not used. */
  
  ///
  ///
  ///
  public native @ByVal Status NewWritableFile(@StdString BytePointer fname,
                                   @UniquePtr WritableFile result);
  public native @ByVal Status NewWritableFile(@StdString String fname,
                                   @UniquePtr WritableFile result);

  /** \brief Creates an object that either appends to an existing file, or
   *  writes to a new file (if the file does not exist to begin with).
   * 
   *  On success, stores a pointer to the new file in *result and
   *  returns OK.  On failure stores NULL in *result and returns
   *  non-OK.
   * 
   *  The returned file will only be accessed by one thread at a time.
   * 
   *  The ownership of the returned WritableFile is passed to the caller
   *  and the object should be deleted when is not used. */
  
  ///
  ///
  ///
  public native @ByVal Status NewAppendableFile(@StdString BytePointer fname,
                                     @UniquePtr WritableFile result);
  public native @ByVal Status NewAppendableFile(@StdString String fname,
                                     @UniquePtr WritableFile result);

  /** \brief Creates a readonly region of memory with the file context.
   * 
   *  On success, it returns a pointer to read-only memory region
   *  from the content of file fname. The ownership of the region is passed to
   *  the caller. On failure stores nullptr in *result and returns non-OK.
   * 
   *  The returned memory region can be accessed from many threads in parallel.
   * 
   *  The ownership of the returned ReadOnlyMemoryRegion is passed to the caller
   *  and the object should be deleted when is not used. */
  public native @ByVal Status NewReadOnlyMemoryRegionFromFile(
        @StdString BytePointer fname, @UniquePtr ReadOnlyMemoryRegion result);
  public native @ByVal Status NewReadOnlyMemoryRegionFromFile(
        @StdString String fname, @UniquePtr ReadOnlyMemoryRegion result);

  /** Returns OK if the named path exists and NOT_FOUND otherwise. */
  public native @ByVal Status FileExists(@StdString BytePointer fname);
  public native @ByVal Status FileExists(@StdString String fname);

  /** Returns true if all the listed files exist, false otherwise.
   *  if status is not null, populate the vector with a detailed status
   *  for each file. */
  
  ///
  public native @Cast("bool") boolean FilesExist(@Const @ByRef StringVector files,
                            @StdVector Status status);

  /** \brief Returns the immediate children in the given directory.
   * 
   *  The returned paths are relative to 'dir'. */
  
  ///
  ///
  ///
  public native @ByVal Status GetChildren(@StdString BytePointer dir,
                               StringVector result);
  public native @ByVal Status GetChildren(@StdString String dir,
                               StringVector result);

  /** \brief Given a pattern, stores in *results the set of paths that matches
   *  that pattern. *results is cleared.
   * 
   *  pattern must match all of a name, not just a substring.
   * 
   *  pattern: { term }
   *  term:
   *    '*': matches any sequence of non-'/' characters
   *    '?': matches a single non-'/' character
   *    '[' [ '^' ] { match-list } ']':
   *         matches any single character (not) on the list
   *    c: matches character c (c != '*', '?', '\\', '[')
   *    '\\' c: matches character c
   *  character-range:
   *    c: matches character c (c != '\\', '-', ']')
   *    '\\' c: matches character c
   *    lo '-' hi: matches character c for lo <= c <= hi
   * 
   *  Typical return codes:
   *   * OK - no errors
   *   * UNIMPLEMENTED - Some underlying functions (like GetChildren) are not
   *                     implemented */
  public native @ByVal Status GetMatchingPaths(@StdString BytePointer pattern,
                                    StringVector results);
  public native @ByVal Status GetMatchingPaths(@StdString String pattern,
                                    StringVector results);

  /** \brief Obtains statistics for the given path. */
  public native @ByVal Status Stat(@StdString BytePointer fname, FileStatistics stat);
  public native @ByVal Status Stat(@StdString String fname, FileStatistics stat);

  /** \brief Deletes the named file. */
  public native @ByVal Status DeleteFile(@StdString BytePointer fname);
  public native @ByVal Status DeleteFile(@StdString String fname);

  /** \brief Creates the specified directory.
   *  Typical return codes:
   *   * OK - successfully created the directory.
   *   * ALREADY_EXISTS - directory with name dirname already exists.
   *   * PERMISSION_DENIED - dirname is not writable. */
  public native @ByVal Status CreateDir(@StdString BytePointer dirname);
  public native @ByVal Status CreateDir(@StdString String dirname);

  /** \brief Creates the specified directory and all the necessary
   *  subdirectories.
   *  Typical return codes:
   *   * OK - successfully created the directory and sub directories, even if
   *          they were already created.
   *   * PERMISSION_DENIED - dirname or some subdirectory is not writable. */
  public native @ByVal Status RecursivelyCreateDir(@StdString BytePointer dirname);
  public native @ByVal Status RecursivelyCreateDir(@StdString String dirname);

  /** \brief Deletes the specified directory. */
  public native @ByVal Status DeleteDir(@StdString BytePointer dirname);
  public native @ByVal Status DeleteDir(@StdString String dirname);

  /** \brief Deletes the specified directory and all subdirectories and files
   *  underneath it. undeleted_files and undeleted_dirs stores the number of
   *  files and directories that weren't deleted (unspecified if the return
   *  status is not OK).
   *  REQUIRES: undeleted_files, undeleted_dirs to be not null.
   *  Typical return codes:
   *   * OK - dirname exists and we were able to delete everything underneath.
   *   * NOT_FOUND - dirname doesn't exist
   *   * PERMISSION_DENIED - dirname or some descendant is not writable
   *   * UNIMPLEMENTED - Some underlying functions (like Delete) are not
   *                     implemented */
  public native @ByVal Status DeleteRecursively(@StdString BytePointer dirname,
                                     @Cast("tensorflow::int64*") LongPointer undeleted_files,
                                     @Cast("tensorflow::int64*") LongPointer undeleted_dirs);
  public native @ByVal Status DeleteRecursively(@StdString String dirname,
                                     @Cast("tensorflow::int64*") LongBuffer undeleted_files,
                                     @Cast("tensorflow::int64*") LongBuffer undeleted_dirs);
  public native @ByVal Status DeleteRecursively(@StdString BytePointer dirname,
                                     @Cast("tensorflow::int64*") long[] undeleted_files,
                                     @Cast("tensorflow::int64*") long... undeleted_dirs);
  public native @ByVal Status DeleteRecursively(@StdString String dirname,
                                     @Cast("tensorflow::int64*") LongPointer undeleted_files,
                                     @Cast("tensorflow::int64*") LongPointer undeleted_dirs);
  public native @ByVal Status DeleteRecursively(@StdString BytePointer dirname,
                                     @Cast("tensorflow::int64*") LongBuffer undeleted_files,
                                     @Cast("tensorflow::int64*") LongBuffer undeleted_dirs);
  public native @ByVal Status DeleteRecursively(@StdString String dirname,
                                     @Cast("tensorflow::int64*") long[] undeleted_files,
                                     @Cast("tensorflow::int64*") long... undeleted_dirs);

  /** \brief Stores the size of {@code fname} in {@code *file_size}. */
  public native @ByVal Status GetFileSize(@StdString BytePointer fname, @Cast("tensorflow::uint64*") LongPointer file_size);
  public native @ByVal Status GetFileSize(@StdString String fname, @Cast("tensorflow::uint64*") LongBuffer file_size);
  public native @ByVal Status GetFileSize(@StdString BytePointer fname, @Cast("tensorflow::uint64*") long... file_size);
  public native @ByVal Status GetFileSize(@StdString String fname, @Cast("tensorflow::uint64*") LongPointer file_size);
  public native @ByVal Status GetFileSize(@StdString BytePointer fname, @Cast("tensorflow::uint64*") LongBuffer file_size);
  public native @ByVal Status GetFileSize(@StdString String fname, @Cast("tensorflow::uint64*") long... file_size);

  /** \brief Overwrites the target if it exists. */
  public native @ByVal Status RenameFile(@StdString BytePointer src, @StdString BytePointer target);
  public native @ByVal Status RenameFile(@StdString String src, @StdString String target);

  /** \brief Copy the src to target. */
  
  ///
  public native @ByVal Status CopyFile(@StdString BytePointer src, @StdString BytePointer target);
  public native @ByVal Status CopyFile(@StdString String src, @StdString String target);

  /** \brief Translate an URI to a filename for the FileSystem implementation.
   * 
   *  The implementation in this class cleans up the path, removing
   *  duplicate /'s, resolving .. and . (more details in
   *  tensorflow::lib::io::CleanPath). */
  
  ///
  public native @StdString BytePointer TranslateName(@StdString BytePointer name);
  public native @StdString String TranslateName(@StdString String name);

  /** \brief Returns whether the given path is a directory or not.
   * 
   *  Typical return codes (not guaranteed exhaustive):
   *   * OK - The path exists and is a directory.
   *   * FAILED_PRECONDITION - The path exists and is not a directory.
   *   * NOT_FOUND - The path entry does not exist.
   *   * PERMISSION_DENIED - Insufficient permissions.
   *   * UNIMPLEMENTED - The file factory doesn't support directories. */
  public native @ByVal Status IsDirectory(@StdString BytePointer fname);
  public native @ByVal Status IsDirectory(@StdString String fname);

  /** \brief Flushes any cached filesystem objects from memory. */
  public native void FlushCaches();
}

/** A file abstraction for randomly reading the contents of a file. */
@Namespace("tensorflow") public static class RandomAccessFile extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public RandomAccessFile(Pointer p) { super(p); }


  /** \brief Reads up to {@code n} bytes from the file starting at {@code offset}.
   * 
   *  {@code scratch[0..n-1]} may be written by this routine.  Sets {@code *result}
   *  to the data that was read (including if fewer than {@code n} bytes were
   *  successfully read).  May set {@code *result} to point at data in
   *  {@code scratch[0..n-1]}, so {@code scratch[0..n-1]} must be live when
   *  {@code *result} is used.
   * 
   *  On OK returned status: {@code n} bytes have been stored in {@code *result}.
   *  On non-OK returned status: {@code [0..n]} bytes have been stored in {@code *result}.
   * 
   *  Returns {@code OUT_OF_RANGE} if fewer than n bytes were stored in {@code *result}
   *  because of EOF.
   * 
   *  Safe for concurrent use by multiple threads. */
  public native @ByVal Status Read(@Cast("tensorflow::uint64") long offset, @Cast("size_t") long n, @StringPiece @Cast({"char*", "StringPiece*"}) BytePointer result,
                        @Cast("char*") BytePointer scratch);
  public native @ByVal Status Read(@Cast("tensorflow::uint64") long offset, @Cast("size_t") long n, @StringPiece @Cast({"char*", "StringPiece*"}) BytePointer result,
                        @Cast("char*") ByteBuffer scratch);
  public native @ByVal Status Read(@Cast("tensorflow::uint64") long offset, @Cast("size_t") long n, @StringPiece @Cast({"char*", "StringPiece*"}) BytePointer result,
                        @Cast("char*") byte[] scratch);
}

/** \brief A file abstraction for sequential writing.
 * 
 *  The implementation must provide buffering since callers may append
 *  small fragments at a time to the file. */
@Namespace("tensorflow") public static class WritableFile extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public WritableFile(Pointer p) { super(p); }


  /** \brief Append 'data' to the file. */
  
  ///
  ///
  public native @ByVal Status Append(@StringPiece BytePointer data);
  public native @ByVal Status Append(@StringPiece String data);

  // TODO(ebrevdo): Remove this ifdef when absl is updated.
// #if defined(PLATFORM_GOOGLE)
// #endif

  /** \brief Close the file.
   * 
   *  Flush() and de-allocate resources associated with this file
   * 
   *  Typical return codes (not guaranteed to be exhaustive):
   *   * OK
   *   * Other codes, as returned from Flush() */
  
  ///
  ///
  public native @ByVal Status Close();

  /** \brief Flushes the file and optionally syncs contents to filesystem.
   * 
   *  This should flush any local buffers whose contents have not been
   *  delivered to the filesystem.
   * 
   *  If the process terminates after a successful flush, the contents
   *  may still be persisted, since the underlying filesystem may
   *  eventually flush the contents.  If the OS or machine crashes
   *  after a successful flush, the contents may or may not be
   *  persisted, depending on the implementation. */
  
  ///
  public native @ByVal Status Flush();

  /** \brief Syncs contents of file to filesystem.
   * 
   *  This waits for confirmation from the filesystem that the contents
   *  of the file have been persisted to the filesystem; if the OS
   *  or machine crashes after a successful Sync, the contents should
   *  be properly saved. */
  public native @ByVal Status Sync();
}

/** \brief A readonly memmapped file abstraction.
 * 
 *  The implementation must guarantee that all memory is accessible when the
 *  object exists, independently from the Env that created it. */
@Namespace("tensorflow") public static class ReadOnlyMemoryRegion extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public ReadOnlyMemoryRegion(Pointer p) { super(p); }


  /** \brief Returns a pointer to the memory region. */
  public native @Const Pointer data();

  /** \brief Returns the length of the memory region in bytes. */
  public native @Cast("tensorflow::uint64") long length();
}

/** \brief A registry for file system implementations.
 * 
 *  Filenames are specified as an URI, which is of the form
 *  [scheme://].
 *  File system implementations are registered using the REGISTER_FILE_SYSTEM
 *  macro, providing the 'scheme' as the key. */
@Namespace("tensorflow") public static class FileSystemRegistry extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public FileSystemRegistry(Pointer p) { super(p); }

  public native @ByVal Status Register(@StdString BytePointer scheme, @ByVal @Cast("tensorflow::FileSystemRegistry::Factory*") Pointer factory);
  public native @ByVal Status Register(@StdString String scheme, @ByVal @Cast("tensorflow::FileSystemRegistry::Factory*") Pointer factory);
  public native FileSystem Lookup(@StdString BytePointer scheme);
  public native FileSystem Lookup(@StdString String scheme);
  public native @ByVal Status GetRegisteredFileSystemSchemes(
        StringVector schemes);
}

  // namespace tensorflow

// #endif  // TENSORFLOW_CORE_PLATFORM_FILE_SYSTEM_H_


// Parsed from tensorflow/core/platform/file_statistics.h

/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// #ifndef TENSORFLOW_CORE_PLATFORM_FILE_STATISTICS_H_
// #define TENSORFLOW_CORE_PLATFORM_FILE_STATISTICS_H_

// #include "tensorflow/core/platform/types.h"

@Namespace("tensorflow") @NoOffset public static class FileStatistics extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public FileStatistics(Pointer p) { super(p); }
    /** Native array allocator. Access with {@link Pointer#position(long)}. */
    public FileStatistics(long size) { super((Pointer)null); allocateArray(size); }
    private native void allocateArray(long size);
    @Override public FileStatistics position(long position) {
        return (FileStatistics)super.position(position);
    }

  // The length of the file or -1 if finding file length is not supported.
  public native @Cast("tensorflow::int64") long length(); public native FileStatistics length(long length);
  // The last modified time in nanoseconds.
  public native @Cast("tensorflow::int64") long mtime_nsec(); public native FileStatistics mtime_nsec(long mtime_nsec);
  // True if the file is a directory, otherwise false.
  public native @Cast("bool") boolean is_directory(); public native FileStatistics is_directory(boolean is_directory);

  public FileStatistics() { super((Pointer)null); allocate(); }
  private native void allocate();
  public FileStatistics(@Cast("tensorflow::int64") long length, @Cast("tensorflow::int64") long mtime_nsec, @Cast("bool") boolean is_directory) { super((Pointer)null); allocate(length, mtime_nsec, is_directory); }
  private native void allocate(@Cast("tensorflow::int64") long length, @Cast("tensorflow::int64") long mtime_nsec, @Cast("bool") boolean is_directory);
}

  // namespace tensorflow

// #endif  // TENSORFLOW_CORE_PLATFORM_FILE_STATISTICS_H_


// Parsed from tensorflow/core/platform/env.h

/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// #ifndef TENSORFLOW_CORE_PLATFORM_ENV_H_
// #define TENSORFLOW_CORE_PLATFORM_ENV_H_

// #include 
// #include 
// #include 
// #include 
// #include 
// #include "tensorflow/core/lib/core/errors.h"
// #include "tensorflow/core/lib/core/status.h"
// #include "tensorflow/core/lib/core/stringpiece.h"
// #include "tensorflow/core/platform/env_time.h"
// #include "tensorflow/core/platform/file_system.h"
// #include "tensorflow/core/platform/macros.h"
// #include "tensorflow/core/platform/mutex.h"
// #include "tensorflow/core/platform/protobuf.h"
// #include "tensorflow/core/platform/types.h"

/** \brief An interface used by the tensorflow implementation to
 *  access operating system functionality like the filesystem etc.
 * 
 *  Callers may wish to provide a custom Env object to get fine grain
 *  control.
 * 
 *  All Env implementations are safe for concurrent access from
 *  multiple threads without any external synchronization. */
@Namespace("tensorflow") @NoOffset public static class Env extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public Env(Pointer p) { super(p); }


  /** \brief Returns a default environment suitable for the current operating
   *  system.
   * 
   *  Sophisticated users may wish to provide their own Env
   *  implementation instead of relying on this default environment.
   * 
   *  The result of Default() belongs to this library and must never be deleted. */
  public static native Env Default();

  /** \brief Returns the FileSystem object to handle operations on the file
   *  specified by 'fname'. The FileSystem object is used as the implementation
   *  for the file system related (non-virtual) functions that follow.
   *  Returned FileSystem object is still owned by the Env object and will */
  // (might) be destroyed when the environment is destroyed.
  public native @ByVal Status GetFileSystemForFile(@StdString BytePointer fname, @Cast("tensorflow::FileSystem**") PointerPointer result);
  public native @ByVal Status GetFileSystemForFile(@StdString BytePointer fname, @ByPtrPtr FileSystem result);
  public native @ByVal Status GetFileSystemForFile(@StdString String fname, @ByPtrPtr FileSystem result);

  /** \brief Returns the file system schemes registered for this Env. */
  public native @ByVal Status GetRegisteredFileSystemSchemes(StringVector schemes);

  /** \brief Register a file system for a scheme. */
  public native @ByVal Status RegisterFileSystem(@StdString BytePointer scheme,
                                      @ByVal @Cast("tensorflow::FileSystemRegistry::Factory*") Pointer factory);
  public native @ByVal Status RegisterFileSystem(@StdString String scheme,
                                      @ByVal @Cast("tensorflow::FileSystemRegistry::Factory*") Pointer factory);

  /** \brief Flush filesystem caches for all registered filesystems. */
  
  ///
  ///
  public native @ByVal Status FlushFileSystemCaches();

  /** \brief Creates a brand new random access read-only file with the
   *  specified name.
   

* On success, stores a pointer to the new file in * *result and returns OK. On failure stores NULL in *result and * returns non-OK. If the file does not exist, returns a non-OK * status. * * The returned file may be concurrently accessed by multiple threads. * * The ownership of the returned RandomAccessFile is passed to the caller * and the object should be deleted when is not used. The file object * shouldn't live longer than the Env object. */ /// /// /// public native @ByVal Status NewRandomAccessFile(@StdString BytePointer fname, @UniquePtr RandomAccessFile result); public native @ByVal Status NewRandomAccessFile(@StdString String fname, @UniquePtr RandomAccessFile result); /** \brief Creates an object that writes to a new file with the specified * name. * * Deletes any existing file with the same name and creates a * new file. On success, stores a pointer to the new file in * *result and returns OK. On failure stores NULL in *result and * returns non-OK. * * The returned file will only be accessed by one thread at a time. * * The ownership of the returned WritableFile is passed to the caller * and the object should be deleted when is not used. The file object * shouldn't live longer than the Env object. */ /// /// /// public native @ByVal Status NewWritableFile(@StdString BytePointer fname, @UniquePtr WritableFile result); public native @ByVal Status NewWritableFile(@StdString String fname, @UniquePtr WritableFile result); /** \brief Creates an object that either appends to an existing file, or * writes to a new file (if the file does not exist to begin with). * * On success, stores a pointer to the new file in *result and * returns OK. On failure stores NULL in *result and returns * non-OK. * * The returned file will only be accessed by one thread at a time. * * The ownership of the returned WritableFile is passed to the caller * and the object should be deleted when is not used. The file object * shouldn't live longer than the Env object. */ /// /// /// public native @ByVal Status NewAppendableFile(@StdString BytePointer fname, @UniquePtr WritableFile result); public native @ByVal Status NewAppendableFile(@StdString String fname, @UniquePtr WritableFile result); /** \brief Creates a readonly region of memory with the file context. * * On success, it returns a pointer to read-only memory region * from the content of file fname. The ownership of the region is passed to * the caller. On failure stores nullptr in *result and returns non-OK. * * The returned memory region can be accessed from many threads in parallel. * * The ownership of the returned ReadOnlyMemoryRegion is passed to the caller * and the object should be deleted when is not used. The memory region * object shouldn't live longer than the Env object. */ public native @ByVal Status NewReadOnlyMemoryRegionFromFile( @StdString BytePointer fname, @UniquePtr ReadOnlyMemoryRegion result); public native @ByVal Status NewReadOnlyMemoryRegionFromFile( @StdString String fname, @UniquePtr ReadOnlyMemoryRegion result); /** Returns OK if the named path exists and NOT_FOUND otherwise. */ public native @ByVal Status FileExists(@StdString BytePointer fname); public native @ByVal Status FileExists(@StdString String fname); /** Returns true if all the listed files exist, false otherwise. * if status is not null, populate the vector with a detailed status * for each file. */ /// public native @Cast("bool") boolean FilesExist(@Const @ByRef StringVector files, @StdVector Status status); /** \brief Stores in *result the names of the children of the specified * directory. The names are relative to "dir". * * Original contents of *results are dropped. */ public native @ByVal Status GetChildren(@StdString BytePointer dir, StringVector result); public native @ByVal Status GetChildren(@StdString String dir, StringVector result); /** \brief Returns true if the path matches the given pattern. The wildcards * allowed in pattern are described in FileSystem::GetMatchingPaths. */ /// public native @Cast("bool") boolean MatchPath(@StdString BytePointer path, @StdString BytePointer pattern); public native @Cast("bool") boolean MatchPath(@StdString String path, @StdString String pattern); /** \brief Given a pattern, stores in *results the set of paths that matches * that pattern. *results is cleared. * * More details about {@code pattern} in FileSystem::GetMatchingPaths. */ public native @ByVal Status GetMatchingPaths(@StdString BytePointer pattern, StringVector results); public native @ByVal Status GetMatchingPaths(@StdString String pattern, StringVector results); /** Deletes the named file. */ public native @ByVal Status DeleteFile(@StdString BytePointer fname); public native @ByVal Status DeleteFile(@StdString String fname); /** \brief Deletes the specified directory and all subdirectories and files * underneath it. undeleted_files and undeleted_dirs stores the number of * files and directories that weren't deleted (unspecified if the return * status is not OK). * REQUIRES: undeleted_files, undeleted_dirs to be not null. * Typical return codes * * OK - dirname exists and we were able to delete everything underneath. * * NOT_FOUND - dirname doesn't exist * * PERMISSION_DENIED - dirname or some descendant is not writable * * UNIMPLEMENTED - Some underlying functions (like Delete) are not * implemented */ public native @ByVal Status DeleteRecursively(@StdString BytePointer dirname, @Cast("tensorflow::int64*") LongPointer undeleted_files, @Cast("tensorflow::int64*") LongPointer undeleted_dirs); public native @ByVal Status DeleteRecursively(@StdString String dirname, @Cast("tensorflow::int64*") LongBuffer undeleted_files, @Cast("tensorflow::int64*") LongBuffer undeleted_dirs); public native @ByVal Status DeleteRecursively(@StdString BytePointer dirname, @Cast("tensorflow::int64*") long[] undeleted_files, @Cast("tensorflow::int64*") long... undeleted_dirs); public native @ByVal Status DeleteRecursively(@StdString String dirname, @Cast("tensorflow::int64*") LongPointer undeleted_files, @Cast("tensorflow::int64*") LongPointer undeleted_dirs); public native @ByVal Status DeleteRecursively(@StdString BytePointer dirname, @Cast("tensorflow::int64*") LongBuffer undeleted_files, @Cast("tensorflow::int64*") LongBuffer undeleted_dirs); public native @ByVal Status DeleteRecursively(@StdString String dirname, @Cast("tensorflow::int64*") long[] undeleted_files, @Cast("tensorflow::int64*") long... undeleted_dirs); /** \brief Creates the specified directory and all the necessary * subdirectories. Typical return codes. * * OK - successfully created the directory and sub directories, even if * they were already created. * * PERMISSION_DENIED - dirname or some subdirectory is not writable. */ public native @ByVal Status RecursivelyCreateDir(@StdString BytePointer dirname); public native @ByVal Status RecursivelyCreateDir(@StdString String dirname); /** \brief Creates the specified directory. Typical return codes * * OK - successfully created the directory. * * ALREADY_EXISTS - directory already exists. * * PERMISSION_DENIED - dirname is not writable. */ public native @ByVal Status CreateDir(@StdString BytePointer dirname); public native @ByVal Status CreateDir(@StdString String dirname); /** Deletes the specified directory. */ public native @ByVal Status DeleteDir(@StdString BytePointer dirname); public native @ByVal Status DeleteDir(@StdString String dirname); /** Obtains statistics for the given path. */ public native @ByVal Status Stat(@StdString BytePointer fname, FileStatistics stat); public native @ByVal Status Stat(@StdString String fname, FileStatistics stat); /** \brief Returns whether the given path is a directory or not. * Typical return codes (not guaranteed exhaustive): * * OK - The path exists and is a directory. * * FAILED_PRECONDITION - The path exists and is not a directory. * * NOT_FOUND - The path entry does not exist. * * PERMISSION_DENIED - Insufficient permissions. * * UNIMPLEMENTED - The file factory doesn't support directories. */ public native @ByVal Status IsDirectory(@StdString BytePointer fname); public native @ByVal Status IsDirectory(@StdString String fname); /** Stores the size of {@code fname} in {@code *file_size}. */ public native @ByVal Status GetFileSize(@StdString BytePointer fname, @Cast("tensorflow::uint64*") LongPointer file_size); public native @ByVal Status GetFileSize(@StdString String fname, @Cast("tensorflow::uint64*") LongBuffer file_size); public native @ByVal Status GetFileSize(@StdString BytePointer fname, @Cast("tensorflow::uint64*") long... file_size); public native @ByVal Status GetFileSize(@StdString String fname, @Cast("tensorflow::uint64*") LongPointer file_size); public native @ByVal Status GetFileSize(@StdString BytePointer fname, @Cast("tensorflow::uint64*") LongBuffer file_size); public native @ByVal Status GetFileSize(@StdString String fname, @Cast("tensorflow::uint64*") long... file_size); /** \brief Renames file src to target. If target already exists, it will be * replaced. */ public native @ByVal Status RenameFile(@StdString BytePointer src, @StdString BytePointer target); public native @ByVal Status RenameFile(@StdString String src, @StdString String target); /** \brief Copy the src to target. */ public native @ByVal Status CopyFile(@StdString BytePointer src, @StdString BytePointer target); public native @ByVal Status CopyFile(@StdString String src, @StdString String target); /** \brief Returns the absolute path of the current executable. It resolves * symlinks if there is any. */ public native @StdString BytePointer GetExecutablePath(); /** Creates a local unique temporary file name. Returns true if success. */ public native @Cast("bool") boolean LocalTempFilename(@StdString @Cast({"char*", "std::string*"}) BytePointer filename); /** Creates a local unique file name that starts with |prefix| and ends with * |suffix|. Returns true if success. */ public native @Cast("bool") boolean CreateUniqueFileName(@StdString @Cast({"char*", "std::string*"}) BytePointer prefix, @StdString BytePointer suffix); public native @Cast("bool") boolean CreateUniqueFileName(@StdString @Cast({"char*", "std::string*"}) BytePointer prefix, @StdString String suffix); // TODO(jeff,sanjay): Add back thread/thread-pool support if needed. // TODO(jeff,sanjay): if needed, tighten spec so relative to epoch, or // provide a routine to get the absolute time. /** \brief Returns the number of nano-seconds since the Unix epoch. */ public native @Cast("tensorflow::uint64") long NowNanos(); /** \brief Returns the number of micro-seconds since the Unix epoch. */ public native @Cast("tensorflow::uint64") long NowMicros(); /** \brief Returns the number of seconds since the Unix epoch. */ public native @Cast("tensorflow::uint64") long NowSeconds(); /** Sleeps/delays the thread for the prescribed number of micro-seconds. */ /// public native void SleepForMicroseconds(@Cast("tensorflow::int64") long micros); /** \brief Returns a new thread that is running fn() and is identified * (for debugging/performance-analysis) by "name". * * Caller takes ownership of the result and must delete it eventually * (the deletion will block until fn() stops running). */ public native Thread StartThread(@Const @ByRef ThreadOptions thread_options, @StdString BytePointer name, @ByVal Fn fn); public native Thread StartThread(@Const @ByRef ThreadOptions thread_options, @StdString String name, @ByVal Fn fn); // \brief Schedules the given closure on a thread-pool. // // NOTE(mrry): This closure may block. public native void SchedClosure(@ByVal Fn closure); // \brief Schedules the given closure on a thread-pool after the given number // of microseconds. // // NOTE(mrry): This closure must not block. public native void SchedClosureAfter(@Cast("tensorflow::int64") long micros, @ByVal Fn closure); // \brief Load a dynamic library. // // Pass "library_filename" to a platform-specific mechanism for dynamically // loading a library. The rules for determining the exact location of the // library are platform-specific and are not documented here. // // On success, returns a handle to the library in "*handle" and returns // OK from the function. // Otherwise returns nullptr in "*handle" and an error status from the // function. public native @ByVal Status LoadLibrary(@Cast("const char*") BytePointer library_filename, @Cast("void**") PointerPointer handle); public native @ByVal Status LoadLibrary(@Cast("const char*") BytePointer library_filename, @Cast("void**") @ByPtrPtr Pointer handle); public native @ByVal Status LoadLibrary(String library_filename, @Cast("void**") @ByPtrPtr Pointer handle); // \brief Get a pointer to a symbol from a dynamic library. // // "handle" should be a pointer returned from a previous call to LoadLibrary. // On success, store a pointer to the located symbol in "*symbol" and return // OK from the function. Otherwise, returns nullptr in "*symbol" and an error // status from the function. public native @ByVal Status GetSymbolFromLibrary(Pointer handle, @Cast("const char*") BytePointer symbol_name, @Cast("void**") PointerPointer symbol); public native @ByVal Status GetSymbolFromLibrary(Pointer handle, @Cast("const char*") BytePointer symbol_name, @Cast("void**") @ByPtrPtr Pointer symbol); public native @ByVal Status GetSymbolFromLibrary(Pointer handle, String symbol_name, @Cast("void**") @ByPtrPtr Pointer symbol); // \brief build the name of dynamic library. // // "name" should be name of the library. // "version" should be the version of the library or NULL // returns the name that LoadLibrary() can use public native @StdString BytePointer FormatLibraryFileName(@StdString BytePointer name, @StdString BytePointer version); public native @StdString String FormatLibraryFileName(@StdString String name, @StdString String version); // Returns a possible list of local temporary directories. public native void GetLocalTempDirectories(StringVector list); } /** \brief An implementation of Env that forwards all calls to another Env. * * May be useful to clients who wish to override just part of the * functionality of another Env. */ @Namespace("tensorflow") @NoOffset public static class EnvWrapper extends Env { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EnvWrapper(Pointer p) { super(p); } /** Initializes an EnvWrapper that delegates all calls to *t */ public EnvWrapper(Env t) { super((Pointer)null); allocate(t); } private native void allocate(Env t); /** Returns the target to which this Env forwards all calls */ public native Env target(); public native @ByVal Status GetFileSystemForFile(@StdString BytePointer fname, @Cast("tensorflow::FileSystem**") PointerPointer result); public native @ByVal Status GetFileSystemForFile(@StdString BytePointer fname, @ByPtrPtr FileSystem result); public native @ByVal Status GetFileSystemForFile(@StdString String fname, @ByPtrPtr FileSystem result); public native @ByVal Status GetRegisteredFileSystemSchemes(StringVector schemes); public native @ByVal Status RegisterFileSystem(@StdString BytePointer scheme, @ByVal @Cast("tensorflow::FileSystemRegistry::Factory*") Pointer factory); public native @ByVal Status RegisterFileSystem(@StdString String scheme, @ByVal @Cast("tensorflow::FileSystemRegistry::Factory*") Pointer factory); public native @Cast("bool") boolean MatchPath(@StdString BytePointer path, @StdString BytePointer pattern); public native @Cast("bool") boolean MatchPath(@StdString String path, @StdString String pattern); public native @Cast("tensorflow::uint64") long NowMicros(); public native void SleepForMicroseconds(@Cast("tensorflow::int64") long micros); public native Thread StartThread(@Const @ByRef ThreadOptions thread_options, @StdString BytePointer name, @ByVal Fn fn); public native Thread StartThread(@Const @ByRef ThreadOptions thread_options, @StdString String name, @ByVal Fn fn); public native void SchedClosure(@ByVal Fn closure); public native void SchedClosureAfter(@Cast("tensorflow::int64") long micros, @ByVal Fn closure); public native @ByVal Status LoadLibrary(@Cast("const char*") BytePointer library_filename, @Cast("void**") PointerPointer handle); public native @ByVal Status LoadLibrary(@Cast("const char*") BytePointer library_filename, @Cast("void**") @ByPtrPtr Pointer handle); public native @ByVal Status LoadLibrary(String library_filename, @Cast("void**") @ByPtrPtr Pointer handle); public native @ByVal Status GetSymbolFromLibrary(Pointer handle, @Cast("const char*") BytePointer symbol_name, @Cast("void**") PointerPointer symbol); public native @ByVal Status GetSymbolFromLibrary(Pointer handle, @Cast("const char*") BytePointer symbol_name, @Cast("void**") @ByPtrPtr Pointer symbol); public native @ByVal Status GetSymbolFromLibrary(Pointer handle, String symbol_name, @Cast("void**") @ByPtrPtr Pointer symbol); public native @StdString BytePointer FormatLibraryFileName(@StdString BytePointer name, @StdString BytePointer version); public native @StdString String FormatLibraryFileName(@StdString String name, @StdString String version); } /** Represents a thread used to run a Tensorflow function. */ @Namespace("tensorflow") public static class Thread extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Thread(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Thread(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public Thread position(long position) { return (Thread)super.position(position); } public Thread() { super((Pointer)null); allocate(); } private native void allocate(); /** Blocks until the thread of control stops running. */ } /** \brief Options to configure a Thread. * * Note that the options are all hints, and the * underlying implementation may choose to ignore it. */ @Namespace("tensorflow") public static class ThreadOptions extends Pointer { static { Loader.load(); } /** Default native constructor. */ public ThreadOptions() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public ThreadOptions(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ThreadOptions(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public ThreadOptions position(long position) { return (ThreadOptions)super.position(position); } /** Thread stack size to use (in bytes). */ public native @Cast("size_t") long stack_size(); public native ThreadOptions stack_size(long stack_size); // 0: use system default value /** Guard area size to use near thread stacks to use (in bytes) */ public native @Cast("size_t") long guard_size(); public native ThreadOptions guard_size(long guard_size); // 0: use system default value } /** A utility routine: copy contents of {@code src} in file system {@code src_fs} * to {@code target} in file system {@code target_fs}. */ @Namespace("tensorflow") public static native @ByVal Status FileSystemCopyFile(FileSystem src_fs, @StdString BytePointer src, FileSystem target_fs, @StdString BytePointer target); @Namespace("tensorflow") public static native @ByVal Status FileSystemCopyFile(FileSystem src_fs, @StdString String src, FileSystem target_fs, @StdString String target); /** A utility routine: reads contents of named file into {@code *data} */ @Namespace("tensorflow") public static native @ByVal Status ReadFileToString(Env env, @StdString BytePointer fname, @StdString @Cast({"char*", "std::string*"}) BytePointer data); @Namespace("tensorflow") public static native @ByVal Status ReadFileToString(Env env, @StdString String fname, @StdString @Cast({"char*", "std::string*"}) BytePointer data); /** A utility routine: write contents of {@code data} to file named {@code fname} * (overwriting existing contents, if any). */ @Namespace("tensorflow") public static native @ByVal Status WriteStringToFile(Env env, @StdString BytePointer fname, @StringPiece BytePointer data); @Namespace("tensorflow") public static native @ByVal Status WriteStringToFile(Env env, @StdString String fname, @StringPiece String data); /** Write binary representation of "proto" to the named file. */ @Namespace("tensorflow") public static native @ByVal Status WriteBinaryProto(Env env, @StdString BytePointer fname, @Cast("const tensorflow::protobuf::MessageLite*") @ByRef MessageLite proto); @Namespace("tensorflow") public static native @ByVal Status WriteBinaryProto(Env env, @StdString String fname, @Cast("const tensorflow::protobuf::MessageLite*") @ByRef MessageLite proto); /** Reads contents of named file and parse as binary encoded proto data * and store into {@code *proto}. */ @Namespace("tensorflow") public static native @ByVal Status ReadBinaryProto(Env env, @StdString BytePointer fname, @Cast("tensorflow::protobuf::MessageLite*") MessageLite proto); @Namespace("tensorflow") public static native @ByVal Status ReadBinaryProto(Env env, @StdString String fname, @Cast("tensorflow::protobuf::MessageLite*") MessageLite proto); /** Write the text representation of "proto" to the named file. */ @Namespace("tensorflow") public static native @ByVal Status WriteTextProto(Env env, @StdString BytePointer fname, @Cast("const tensorflow::protobuf::Message*") @ByRef MessageLite proto); @Namespace("tensorflow") public static native @ByVal Status WriteTextProto(Env env, @StdString String fname, @Cast("const tensorflow::protobuf::Message*") @ByRef MessageLite proto); /** Read contents of named file and parse as text encoded proto data * and store into {@code *proto}. */ @Namespace("tensorflow") public static native @ByVal Status ReadTextProto(Env env, @StdString BytePointer fname, @Cast("tensorflow::protobuf::Message*") MessageLite proto); @Namespace("tensorflow") public static native @ByVal Status ReadTextProto(Env env, @StdString String fname, @Cast("tensorflow::protobuf::Message*") MessageLite proto); // START_SKIP_DOXYGEN // namespace register_file_system // END_SKIP_DOXYGEN // namespace tensorflow // Register a FileSystem implementation for a scheme. Files with names that have // "scheme://" prefixes are routed to use this implementation. // #define REGISTER_FILE_SYSTEM_ENV(env, scheme, factory) // REGISTER_FILE_SYSTEM_UNIQ_HELPER(__COUNTER__, env, scheme, factory) // #define REGISTER_FILE_SYSTEM_UNIQ_HELPER(ctr, env, scheme, factory) // REGISTER_FILE_SYSTEM_UNIQ(ctr, env, scheme, factory) // #define REGISTER_FILE_SYSTEM_UNIQ(ctr, env, scheme, factory) // static ::tensorflow::register_file_system::Register // register_ff##ctr TF_ATTRIBUTE_UNUSED = // ::tensorflow::register_file_system::Register(env, scheme) // #define REGISTER_FILE_SYSTEM(scheme, factory) // REGISTER_FILE_SYSTEM_ENV(::tensorflow::Env::Default(), scheme, factory); // #endif // TENSORFLOW_CORE_PLATFORM_ENV_H_ // Parsed from tensorflow/core/example/feature.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/example/feature.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fexample_2ffeature_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fexample_2ffeature_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // #include // #include // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fexample_2ffeature_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fexample_2ffeature_2eproto @Namespace("tensorflow") @Opaque public static class FeatureLists_FeatureListEntry_DoNotUse extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public FeatureLists_FeatureListEntry_DoNotUse() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FeatureLists_FeatureListEntry_DoNotUse(Pointer p) { super(p); } } @Namespace("tensorflow") @Opaque public static class Features_FeatureEntry_DoNotUse extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public Features_FeatureEntry_DoNotUse() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Features_FeatureEntry_DoNotUse(Pointer p) { super(p); } } // namespace tensorflow // namespace protobuf // namespace google // =================================================================== @Namespace("tensorflow") @NoOffset public static class BytesList extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BytesList(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public BytesList(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public BytesList position(long position) { return (BytesList)super.position(position); } public BytesList() { super((Pointer)null); allocate(); } private native void allocate(); public BytesList(@Const @ByRef BytesList from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef BytesList from); public native @ByRef @Name("operator =") BytesList put(@Const @ByRef BytesList from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef BytesList default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const BytesList internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(BytesList other); public native void Swap(BytesList other); // implements Message ---------------------------------------------- public native BytesList New(); public native BytesList New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef BytesList from); public native void MergeFrom(@Const @ByRef BytesList from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated bytes value = 1; public native int value_size(); public native void clear_value(); @MemberGetter public static native int kValueFieldNumber(); public static final int kValueFieldNumber = kValueFieldNumber(); public native @StdString BytePointer value(int index); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_value(int index); public native void set_value(int index, @StdString BytePointer value); public native void set_value(int index, @StdString String value); // #if LANG_CXX11 // #endif public native void set_value(int index, @Const Pointer value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_value(); public native void add_value(@StdString BytePointer value); public native void add_value(@StdString String value); // #if LANG_CXX11 // #endif public native void add_value(@Const Pointer value, @Cast("size_t") long size); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class FloatList extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FloatList(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public FloatList(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public FloatList position(long position) { return (FloatList)super.position(position); } public FloatList() { super((Pointer)null); allocate(); } private native void allocate(); public FloatList(@Const @ByRef FloatList from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef FloatList from); public native @ByRef @Name("operator =") FloatList put(@Const @ByRef FloatList from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef FloatList default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const FloatList internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(FloatList other); public native void Swap(FloatList other); // implements Message ---------------------------------------------- public native FloatList New(); public native FloatList New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef FloatList from); public native void MergeFrom(@Const @ByRef FloatList from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated float value = 1 [packed = true]; public native int value_size(); public native void clear_value(); @MemberGetter public static native int kValueFieldNumber(); public static final int kValueFieldNumber = kValueFieldNumber(); public native float value(int index); public native void set_value(int index, float value); public native void add_value(float value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class Int64List extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Int64List(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Int64List(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public Int64List position(long position) { return (Int64List)super.position(position); } public Int64List() { super((Pointer)null); allocate(); } private native void allocate(); public Int64List(@Const @ByRef Int64List from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef Int64List from); public native @ByRef @Name("operator =") Int64List put(@Const @ByRef Int64List from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef Int64List default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const Int64List internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(Int64List other); public native void Swap(Int64List other); // implements Message ---------------------------------------------- public native Int64List New(); public native Int64List New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef Int64List from); public native void MergeFrom(@Const @ByRef Int64List from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated int64 value = 1 [packed = true]; public native int value_size(); public native void clear_value(); @MemberGetter public static native int kValueFieldNumber(); public static final int kValueFieldNumber = kValueFieldNumber(); public native @Cast("google::protobuf::int64") long value(int index); public native void set_value(int index, @Cast("google::protobuf::int64") long value); public native void add_value(@Cast("google::protobuf::int64") long value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class Feature extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Feature(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Feature(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public Feature position(long position) { return (Feature)super.position(position); } public Feature() { super((Pointer)null); allocate(); } private native void allocate(); public Feature(@Const @ByRef Feature from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef Feature from); public native @ByRef @Name("operator =") Feature put(@Const @ByRef Feature from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef Feature default_instance(); /** enum tensorflow::Feature::KindCase */ public static final int kBytesList = 1, kFloatList = 2, kInt64List = 3, KIND_NOT_SET = 0; public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const Feature internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(Feature other); public native void Swap(Feature other); // implements Message ---------------------------------------------- public native Feature New(); public native Feature New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef Feature from); public native void MergeFrom(@Const @ByRef Feature from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // .tensorflow.BytesList bytes_list = 1; public native @Cast("bool") boolean has_bytes_list(); public native void clear_bytes_list(); @MemberGetter public static native int kBytesListFieldNumber(); public static final int kBytesListFieldNumber = kBytesListFieldNumber(); public native @Const @ByRef BytesList bytes_list(); public native BytesList release_bytes_list(); public native BytesList mutable_bytes_list(); public native void set_allocated_bytes_list(BytesList bytes_list); public native void unsafe_arena_set_allocated_bytes_list( BytesList bytes_list); public native BytesList unsafe_arena_release_bytes_list(); // .tensorflow.FloatList float_list = 2; public native @Cast("bool") boolean has_float_list(); public native void clear_float_list(); @MemberGetter public static native int kFloatListFieldNumber(); public static final int kFloatListFieldNumber = kFloatListFieldNumber(); public native @Const @ByRef FloatList float_list(); public native FloatList release_float_list(); public native FloatList mutable_float_list(); public native void set_allocated_float_list(FloatList float_list); public native void unsafe_arena_set_allocated_float_list( FloatList float_list); public native FloatList unsafe_arena_release_float_list(); // .tensorflow.Int64List int64_list = 3; public native @Cast("bool") boolean has_int64_list(); public native void clear_int64_list(); @MemberGetter public static native int kInt64ListFieldNumber(); public static final int kInt64ListFieldNumber = kInt64ListFieldNumber(); public native @Const @ByRef Int64List int64_list(); public native Int64List release_int64_list(); public native Int64List mutable_int64_list(); public native void set_allocated_int64_list(Int64List int64_list); public native void unsafe_arena_set_allocated_int64_list( Int64List int64_list); public native Int64List unsafe_arena_release_int64_list(); public native void clear_kind(); public native @Cast("tensorflow::Feature::KindCase") int kind_case(); } // ------------------------------------------------------------------- // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class Features extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Features(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Features(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public Features position(long position) { return (Features)super.position(position); } public Features() { super((Pointer)null); allocate(); } private native void allocate(); public Features(@Const @ByRef Features from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef Features from); public native @ByRef @Name("operator =") Features put(@Const @ByRef Features from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef Features default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const Features internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(Features other); public native void Swap(Features other); // implements Message ---------------------------------------------- public native Features New(); public native Features New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef Features from); public native void MergeFrom(@Const @ByRef Features from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // map feature = 1; public native int feature_size(); public native void clear_feature(); @MemberGetter public static native int kFeatureFieldNumber(); public static final int kFeatureFieldNumber = kFeatureFieldNumber(); public native @Const @ByRef StringFeatureMap feature(); public native StringFeatureMap mutable_feature(); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class FeatureList extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FeatureList(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public FeatureList(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public FeatureList position(long position) { return (FeatureList)super.position(position); } public FeatureList() { super((Pointer)null); allocate(); } private native void allocate(); public FeatureList(@Const @ByRef FeatureList from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef FeatureList from); public native @ByRef @Name("operator =") FeatureList put(@Const @ByRef FeatureList from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef FeatureList default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const FeatureList internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(FeatureList other); public native void Swap(FeatureList other); // implements Message ---------------------------------------------- public native FeatureList New(); public native FeatureList New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef FeatureList from); public native void MergeFrom(@Const @ByRef FeatureList from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.Feature feature = 1; public native int feature_size(); public native void clear_feature(); @MemberGetter public static native int kFeatureFieldNumber(); public static final int kFeatureFieldNumber = kFeatureFieldNumber(); public native Feature mutable_feature(int index); public native @Const @ByRef Feature feature(int index); public native Feature add_feature(); } // ------------------------------------------------------------------- // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class FeatureLists extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FeatureLists(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public FeatureLists(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public FeatureLists position(long position) { return (FeatureLists)super.position(position); } public FeatureLists() { super((Pointer)null); allocate(); } private native void allocate(); public FeatureLists(@Const @ByRef FeatureLists from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef FeatureLists from); public native @ByRef @Name("operator =") FeatureLists put(@Const @ByRef FeatureLists from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef FeatureLists default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const FeatureLists internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(FeatureLists other); public native void Swap(FeatureLists other); // implements Message ---------------------------------------------- public native FeatureLists New(); public native FeatureLists New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef FeatureLists from); public native void MergeFrom(@Const @ByRef FeatureLists from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // map feature_list = 1; public native int feature_list_size(); public native void clear_feature_list(); @MemberGetter public static native int kFeatureListFieldNumber(); public static final int kFeatureListFieldNumber = kFeatureListFieldNumber(); public native @Const @ByRef StringFeatureListMap feature_list(); public native StringFeatureListMap mutable_feature_list(); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // BytesList // repeated bytes value = 1; // #if LANG_CXX11 // #endif // #if LANG_CXX11 // #endif // ------------------------------------------------------------------- // FloatList // repeated float value = 1 [packed = true]; // ------------------------------------------------------------------- // Int64List // repeated int64 value = 1 [packed = true]; // ------------------------------------------------------------------- // Feature // .tensorflow.BytesList bytes_list = 1; // .tensorflow.FloatList float_list = 2; // .tensorflow.Int64List int64_list = 3; // ------------------------------------------------------------------- // ------------------------------------------------------------------- // Features // map feature = 1; // ------------------------------------------------------------------- // FeatureList // repeated .tensorflow.Feature feature = 1; // ------------------------------------------------------------------- // ------------------------------------------------------------------- // FeatureLists // map feature_list = 1; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fexample_2ffeature_2eproto // Parsed from tensorflow/core/example/example.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/example/example.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fexample_2fexample_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fexample_2fexample_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // #include "tensorflow/core/example/feature.pb.h" // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fexample_2fexample_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fexample_2fexample_2eproto // namespace tensorflow // namespace protobuf // namespace google // =================================================================== @Namespace("tensorflow") @NoOffset public static class Example extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Example(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Example(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public Example position(long position) { return (Example)super.position(position); } public Example() { super((Pointer)null); allocate(); } private native void allocate(); public Example(@Const @ByRef Example from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef Example from); public native @ByRef @Name("operator =") Example put(@Const @ByRef Example from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef Example default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const Example internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(Example other); public native void Swap(Example other); // implements Message ---------------------------------------------- public native Example New(); public native Example New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef Example from); public native void MergeFrom(@Const @ByRef Example from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // .tensorflow.Features features = 1; public native @Cast("bool") boolean has_features(); public native void clear_features(); @MemberGetter public static native int kFeaturesFieldNumber(); public static final int kFeaturesFieldNumber = kFeaturesFieldNumber(); public native @Const @ByRef Features features(); public native Features release_features(); public native Features mutable_features(); public native void set_allocated_features(Features features); public native void unsafe_arena_set_allocated_features( Features features); public native Features unsafe_arena_release_features(); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class SequenceExample extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SequenceExample(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public SequenceExample(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public SequenceExample position(long position) { return (SequenceExample)super.position(position); } public SequenceExample() { super((Pointer)null); allocate(); } private native void allocate(); public SequenceExample(@Const @ByRef SequenceExample from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef SequenceExample from); public native @ByRef @Name("operator =") SequenceExample put(@Const @ByRef SequenceExample from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef SequenceExample default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const SequenceExample internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(SequenceExample other); public native void Swap(SequenceExample other); // implements Message ---------------------------------------------- public native SequenceExample New(); public native SequenceExample New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef SequenceExample from); public native void MergeFrom(@Const @ByRef SequenceExample from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // .tensorflow.Features context = 1; public native @Cast("bool") boolean has_context(); public native void clear_context(); @MemberGetter public static native int kContextFieldNumber(); public static final int kContextFieldNumber = kContextFieldNumber(); public native @Const @ByRef Features context(); public native Features release_context(); public native Features mutable_context(); public native void set_allocated_context(Features context); public native void unsafe_arena_set_allocated_context( Features context); public native Features unsafe_arena_release_context(); // .tensorflow.FeatureLists feature_lists = 2; public native @Cast("bool") boolean has_feature_lists(); public native void clear_feature_lists(); @MemberGetter public static native int kFeatureListsFieldNumber(); public static final int kFeatureListsFieldNumber = kFeatureListsFieldNumber(); public native @Const @ByRef FeatureLists feature_lists(); public native FeatureLists release_feature_lists(); public native FeatureLists mutable_feature_lists(); public native void set_allocated_feature_lists(FeatureLists feature_lists); public native void unsafe_arena_set_allocated_feature_lists( FeatureLists feature_lists); public native FeatureLists unsafe_arena_release_feature_lists(); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // Example // .tensorflow.Features features = 1; // ------------------------------------------------------------------- // SequenceExample // .tensorflow.Features context = 1; // .tensorflow.FeatureLists feature_lists = 2; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fexample_2fexample_2eproto // Parsed from tensorflow/core/protobuf/debug.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/protobuf/debug.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2fdebug_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2fdebug_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fprotobuf_2fdebug_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fprotobuf_2fdebug_2eproto // namespace tensorflow // namespace protobuf // namespace google // =================================================================== @Namespace("tensorflow") @NoOffset public static class DebugTensorWatch extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DebugTensorWatch(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public DebugTensorWatch(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public DebugTensorWatch position(long position) { return (DebugTensorWatch)super.position(position); } public DebugTensorWatch() { super((Pointer)null); allocate(); } private native void allocate(); public DebugTensorWatch(@Const @ByRef DebugTensorWatch from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef DebugTensorWatch from); public native @ByRef @Name("operator =") DebugTensorWatch put(@Const @ByRef DebugTensorWatch from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef DebugTensorWatch default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const DebugTensorWatch internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(DebugTensorWatch other); public native void Swap(DebugTensorWatch other); // implements Message ---------------------------------------------- public native DebugTensorWatch New(); public native DebugTensorWatch New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef DebugTensorWatch from); public native void MergeFrom(@Const @ByRef DebugTensorWatch from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated string debug_ops = 3; public native int debug_ops_size(); public native void clear_debug_ops(); @MemberGetter public static native int kDebugOpsFieldNumber(); public static final int kDebugOpsFieldNumber = kDebugOpsFieldNumber(); public native @StdString BytePointer debug_ops(int index); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_debug_ops(int index); public native void set_debug_ops(int index, @StdString BytePointer value); public native void set_debug_ops(int index, @StdString String value); // #if LANG_CXX11 // #endif public native void set_debug_ops(int index, @Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_debug_ops(int index, String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_debug_ops(); public native void add_debug_ops(@StdString BytePointer value); public native void add_debug_ops(@StdString String value); // #if LANG_CXX11 // #endif public native void add_debug_ops(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void add_debug_ops(String value, @Cast("size_t") long size); // repeated string debug_urls = 4; public native int debug_urls_size(); public native void clear_debug_urls(); @MemberGetter public static native int kDebugUrlsFieldNumber(); public static final int kDebugUrlsFieldNumber = kDebugUrlsFieldNumber(); public native @StdString BytePointer debug_urls(int index); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_debug_urls(int index); public native void set_debug_urls(int index, @StdString BytePointer value); public native void set_debug_urls(int index, @StdString String value); // #if LANG_CXX11 // #endif public native void set_debug_urls(int index, @Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_debug_urls(int index, String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_debug_urls(); public native void add_debug_urls(@StdString BytePointer value); public native void add_debug_urls(@StdString String value); // #if LANG_CXX11 // #endif public native void add_debug_urls(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void add_debug_urls(String value, @Cast("size_t") long size); // string node_name = 1; public native void clear_node_name(); @MemberGetter public static native int kNodeNameFieldNumber(); public static final int kNodeNameFieldNumber = kNodeNameFieldNumber(); public native @StdString BytePointer node_name(); public native void set_node_name(@StdString BytePointer value); public native void set_node_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_node_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_node_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_node_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_node_name(); public native void set_allocated_node_name(@StdString @Cast({"char*", "std::string*"}) BytePointer node_name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_node_name(); public native @Deprecated void unsafe_arena_set_allocated_node_name( @StdString @Cast({"char*", "std::string*"}) BytePointer node_name); // int32 output_slot = 2; public native void clear_output_slot(); @MemberGetter public static native int kOutputSlotFieldNumber(); public static final int kOutputSlotFieldNumber = kOutputSlotFieldNumber(); public native @Cast("google::protobuf::int32") int output_slot(); public native void set_output_slot(@Cast("google::protobuf::int32") int value); // bool tolerate_debug_op_creation_failures = 5; public native void clear_tolerate_debug_op_creation_failures(); @MemberGetter public static native int kTolerateDebugOpCreationFailuresFieldNumber(); public static final int kTolerateDebugOpCreationFailuresFieldNumber = kTolerateDebugOpCreationFailuresFieldNumber(); public native @Cast("bool") boolean tolerate_debug_op_creation_failures(); public native void set_tolerate_debug_op_creation_failures(@Cast("bool") boolean value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class DebugOptions extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DebugOptions(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public DebugOptions(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public DebugOptions position(long position) { return (DebugOptions)super.position(position); } public DebugOptions() { super((Pointer)null); allocate(); } private native void allocate(); public DebugOptions(@Const @ByRef DebugOptions from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef DebugOptions from); public native @ByRef @Name("operator =") DebugOptions put(@Const @ByRef DebugOptions from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef DebugOptions default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const DebugOptions internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(DebugOptions other); public native void Swap(DebugOptions other); // implements Message ---------------------------------------------- public native DebugOptions New(); public native DebugOptions New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef DebugOptions from); public native void MergeFrom(@Const @ByRef DebugOptions from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.DebugTensorWatch debug_tensor_watch_opts = 4; public native int debug_tensor_watch_opts_size(); public native void clear_debug_tensor_watch_opts(); @MemberGetter public static native int kDebugTensorWatchOptsFieldNumber(); public static final int kDebugTensorWatchOptsFieldNumber = kDebugTensorWatchOptsFieldNumber(); public native DebugTensorWatch mutable_debug_tensor_watch_opts(int index); public native @Const @ByRef DebugTensorWatch debug_tensor_watch_opts(int index); public native DebugTensorWatch add_debug_tensor_watch_opts(); // bool reset_disk_byte_usage = 11; public native void clear_reset_disk_byte_usage(); @MemberGetter public static native int kResetDiskByteUsageFieldNumber(); public static final int kResetDiskByteUsageFieldNumber = kResetDiskByteUsageFieldNumber(); public native @Cast("bool") boolean reset_disk_byte_usage(); public native void set_reset_disk_byte_usage(@Cast("bool") boolean value); // int64 global_step = 10; public native void clear_global_step(); @MemberGetter public static native int kGlobalStepFieldNumber(); public static final int kGlobalStepFieldNumber = kGlobalStepFieldNumber(); public native @Cast("google::protobuf::int64") long global_step(); public native void set_global_step(@Cast("google::protobuf::int64") long value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class DebuggedSourceFile extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DebuggedSourceFile(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public DebuggedSourceFile(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public DebuggedSourceFile position(long position) { return (DebuggedSourceFile)super.position(position); } public DebuggedSourceFile() { super((Pointer)null); allocate(); } private native void allocate(); public DebuggedSourceFile(@Const @ByRef DebuggedSourceFile from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef DebuggedSourceFile from); public native @ByRef @Name("operator =") DebuggedSourceFile put(@Const @ByRef DebuggedSourceFile from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef DebuggedSourceFile default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const DebuggedSourceFile internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(DebuggedSourceFile other); public native void Swap(DebuggedSourceFile other); // implements Message ---------------------------------------------- public native DebuggedSourceFile New(); public native DebuggedSourceFile New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef DebuggedSourceFile from); public native void MergeFrom(@Const @ByRef DebuggedSourceFile from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated string lines = 5; public native int lines_size(); public native void clear_lines(); @MemberGetter public static native int kLinesFieldNumber(); public static final int kLinesFieldNumber = kLinesFieldNumber(); public native @StdString BytePointer lines(int index); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_lines(int index); public native void set_lines(int index, @StdString BytePointer value); public native void set_lines(int index, @StdString String value); // #if LANG_CXX11 // #endif public native void set_lines(int index, @Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_lines(int index, String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_lines(); public native void add_lines(@StdString BytePointer value); public native void add_lines(@StdString String value); // #if LANG_CXX11 // #endif public native void add_lines(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void add_lines(String value, @Cast("size_t") long size); // string host = 1; public native void clear_host(); @MemberGetter public static native int kHostFieldNumber(); public static final int kHostFieldNumber = kHostFieldNumber(); public native @StdString BytePointer host(); public native void set_host(@StdString BytePointer value); public native void set_host(@StdString String value); // #if LANG_CXX11 // #endif public native void set_host(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_host(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_host(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_host(); public native void set_allocated_host(@StdString @Cast({"char*", "std::string*"}) BytePointer host); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_host(); public native @Deprecated void unsafe_arena_set_allocated_host( @StdString @Cast({"char*", "std::string*"}) BytePointer host); // string file_path = 2; public native void clear_file_path(); @MemberGetter public static native int kFilePathFieldNumber(); public static final int kFilePathFieldNumber = kFilePathFieldNumber(); public native @StdString BytePointer file_path(); public native void set_file_path(@StdString BytePointer value); public native void set_file_path(@StdString String value); // #if LANG_CXX11 // #endif public native void set_file_path(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_file_path(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_file_path(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_file_path(); public native void set_allocated_file_path(@StdString @Cast({"char*", "std::string*"}) BytePointer file_path); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_file_path(); public native @Deprecated void unsafe_arena_set_allocated_file_path( @StdString @Cast({"char*", "std::string*"}) BytePointer file_path); // int64 last_modified = 3; public native void clear_last_modified(); @MemberGetter public static native int kLastModifiedFieldNumber(); public static final int kLastModifiedFieldNumber = kLastModifiedFieldNumber(); public native @Cast("google::protobuf::int64") long last_modified(); public native void set_last_modified(@Cast("google::protobuf::int64") long value); // int64 bytes = 4; public native void clear_bytes(); @MemberGetter public static native int kBytesFieldNumber(); public static final int kBytesFieldNumber = kBytesFieldNumber(); public native @Cast("google::protobuf::int64") long bytes(); public native void set_bytes(@Cast("google::protobuf::int64") long value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class DebuggedSourceFiles extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DebuggedSourceFiles(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public DebuggedSourceFiles(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public DebuggedSourceFiles position(long position) { return (DebuggedSourceFiles)super.position(position); } public DebuggedSourceFiles() { super((Pointer)null); allocate(); } private native void allocate(); public DebuggedSourceFiles(@Const @ByRef DebuggedSourceFiles from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef DebuggedSourceFiles from); public native @ByRef @Name("operator =") DebuggedSourceFiles put(@Const @ByRef DebuggedSourceFiles from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef DebuggedSourceFiles default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const DebuggedSourceFiles internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(DebuggedSourceFiles other); public native void Swap(DebuggedSourceFiles other); // implements Message ---------------------------------------------- public native DebuggedSourceFiles New(); public native DebuggedSourceFiles New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef DebuggedSourceFiles from); public native void MergeFrom(@Const @ByRef DebuggedSourceFiles from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.DebuggedSourceFile source_files = 1; public native int source_files_size(); public native void clear_source_files(); @MemberGetter public static native int kSourceFilesFieldNumber(); public static final int kSourceFilesFieldNumber = kSourceFilesFieldNumber(); public native DebuggedSourceFile mutable_source_files(int index); public native @Const @ByRef DebuggedSourceFile source_files(int index); public native DebuggedSourceFile add_source_files(); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // DebugTensorWatch // string node_name = 1; // #if LANG_CXX11 // #endif // int32 output_slot = 2; // repeated string debug_ops = 3; // #if LANG_CXX11 // #endif // #if LANG_CXX11 // #endif // repeated string debug_urls = 4; // #if LANG_CXX11 // #endif // #if LANG_CXX11 // #endif // bool tolerate_debug_op_creation_failures = 5; // ------------------------------------------------------------------- // DebugOptions // repeated .tensorflow.DebugTensorWatch debug_tensor_watch_opts = 4; // int64 global_step = 10; // bool reset_disk_byte_usage = 11; // ------------------------------------------------------------------- // DebuggedSourceFile // string host = 1; // #if LANG_CXX11 // #endif // string file_path = 2; // #if LANG_CXX11 // #endif // int64 last_modified = 3; // int64 bytes = 4; // repeated string lines = 5; // #if LANG_CXX11 // #endif // #if LANG_CXX11 // #endif // ------------------------------------------------------------------- // DebuggedSourceFiles // repeated .tensorflow.DebuggedSourceFile source_files = 1; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2fdebug_2eproto // Parsed from tensorflow/core/protobuf/cluster.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/protobuf/cluster.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2fcluster_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2fcluster_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // #include // #include // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fprotobuf_2fcluster_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fprotobuf_2fcluster_2eproto @Namespace("tensorflow") @Opaque public static class JobDef_TasksEntry_DoNotUse extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public JobDef_TasksEntry_DoNotUse() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public JobDef_TasksEntry_DoNotUse(Pointer p) { super(p); } } // namespace tensorflow // namespace protobuf // namespace google // =================================================================== // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class JobDef extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public JobDef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public JobDef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public JobDef position(long position) { return (JobDef)super.position(position); } public JobDef() { super((Pointer)null); allocate(); } private native void allocate(); public JobDef(@Const @ByRef JobDef from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef JobDef from); public native @ByRef @Name("operator =") JobDef put(@Const @ByRef JobDef from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef JobDef default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const JobDef internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(JobDef other); public native void Swap(JobDef other); // implements Message ---------------------------------------------- public native JobDef New(); public native JobDef New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef JobDef from); public native void MergeFrom(@Const @ByRef JobDef from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // map tasks = 2; public native int tasks_size(); public native void clear_tasks(); @MemberGetter public static native int kTasksFieldNumber(); public static final int kTasksFieldNumber = kTasksFieldNumber(); public native @Const @ByRef IntStringMap tasks(); public native IntStringMap mutable_tasks(); // string name = 1; public native void clear_name(); @MemberGetter public static native int kNameFieldNumber(); public static final int kNameFieldNumber = kNameFieldNumber(); public native @StdString BytePointer name(); public native void set_name(@StdString BytePointer value); public native void set_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name(); public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_name(); public native @Deprecated void unsafe_arena_set_allocated_name( @StdString @Cast({"char*", "std::string*"}) BytePointer name); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class ClusterDef extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ClusterDef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public ClusterDef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public ClusterDef position(long position) { return (ClusterDef)super.position(position); } public ClusterDef() { super((Pointer)null); allocate(); } private native void allocate(); public ClusterDef(@Const @ByRef ClusterDef from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef ClusterDef from); public native @ByRef @Name("operator =") ClusterDef put(@Const @ByRef ClusterDef from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef ClusterDef default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const ClusterDef internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(ClusterDef other); public native void Swap(ClusterDef other); // implements Message ---------------------------------------------- public native ClusterDef New(); public native ClusterDef New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef ClusterDef from); public native void MergeFrom(@Const @ByRef ClusterDef from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.JobDef job = 1; public native int job_size(); public native void clear_job(); @MemberGetter public static native int kJobFieldNumber(); public static final int kJobFieldNumber = kJobFieldNumber(); public native JobDef mutable_job(int index); public native @Const @ByRef JobDef job(int index); public native JobDef add_job(); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // ------------------------------------------------------------------- // JobDef // string name = 1; // #if LANG_CXX11 // #endif // map tasks = 2; // ------------------------------------------------------------------- // ClusterDef // repeated .tensorflow.JobDef job = 1; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // ------------------------------------------------------------------- // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2fcluster_2eproto // Parsed from tensorflow/core/protobuf/rewriter_config.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/protobuf/rewriter_config.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2frewriter_5fconfig_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2frewriter_5fconfig_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // #include // #include // #include // #include "tensorflow/core/framework/attr_value.pb.h" // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fprotobuf_2frewriter_5fconfig_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fprotobuf_2frewriter_5fconfig_2eproto @Namespace("tensorflow") @Opaque public static class RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RewriterConfig_CustomGraphOptimizer_ParameterMapEntry_DoNotUse(Pointer p) { super(p); } } // namespace tensorflow // namespace protobuf // namespace google /** enum tensorflow::RewriterConfig_Toggle */ public static final int RewriterConfig_Toggle_DEFAULT = 0, RewriterConfig_Toggle_ON = 1, RewriterConfig_Toggle_OFF = 2, RewriterConfig_Toggle_AGGRESSIVE = 3, RewriterConfig_Toggle_RewriterConfig_Toggle_INT_MIN_SENTINEL_DO_NOT_USE_ = kint32min, RewriterConfig_Toggle_RewriterConfig_Toggle_INT_MAX_SENTINEL_DO_NOT_USE_ = kint32max; @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_Toggle_IsValid(int value); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::RewriterConfig_Toggle") int RewriterConfig_Toggle_Toggle_MIN(); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::RewriterConfig_Toggle") int RewriterConfig_Toggle_Toggle_MAX(); @Namespace("tensorflow") @MemberGetter public static native int RewriterConfig_Toggle_Toggle_ARRAYSIZE(); @Namespace("tensorflow") public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer RewriterConfig_Toggle_descriptor(); @Namespace("tensorflow") public static native @StdString BytePointer RewriterConfig_Toggle_Name(@Cast("tensorflow::RewriterConfig_Toggle") int value); @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_Toggle_Parse( @StdString BytePointer name, @Cast("tensorflow::RewriterConfig_Toggle*") IntPointer value); @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_Toggle_Parse( @StdString String name, @Cast("tensorflow::RewriterConfig_Toggle*") IntBuffer value); @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_Toggle_Parse( @StdString BytePointer name, @Cast("tensorflow::RewriterConfig_Toggle*") int... value); @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_Toggle_Parse( @StdString String name, @Cast("tensorflow::RewriterConfig_Toggle*") IntPointer value); @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_Toggle_Parse( @StdString BytePointer name, @Cast("tensorflow::RewriterConfig_Toggle*") IntBuffer value); @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_Toggle_Parse( @StdString String name, @Cast("tensorflow::RewriterConfig_Toggle*") int... value); /** enum tensorflow::RewriterConfig_NumIterationsType */ public static final int RewriterConfig_NumIterationsType_DEFAULT_NUM_ITERS = 0, RewriterConfig_NumIterationsType_ONE = 1, RewriterConfig_NumIterationsType_TWO = 2, RewriterConfig_NumIterationsType_RewriterConfig_NumIterationsType_INT_MIN_SENTINEL_DO_NOT_USE_ = kint32min, RewriterConfig_NumIterationsType_RewriterConfig_NumIterationsType_INT_MAX_SENTINEL_DO_NOT_USE_ = kint32max; @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_NumIterationsType_IsValid(int value); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::RewriterConfig_NumIterationsType") int RewriterConfig_NumIterationsType_NumIterationsType_MIN(); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::RewriterConfig_NumIterationsType") int RewriterConfig_NumIterationsType_NumIterationsType_MAX(); @Namespace("tensorflow") @MemberGetter public static native int RewriterConfig_NumIterationsType_NumIterationsType_ARRAYSIZE(); @Namespace("tensorflow") public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer RewriterConfig_NumIterationsType_descriptor(); @Namespace("tensorflow") public static native @StdString BytePointer RewriterConfig_NumIterationsType_Name(@Cast("tensorflow::RewriterConfig_NumIterationsType") int value); @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_NumIterationsType_Parse( @StdString BytePointer name, @Cast("tensorflow::RewriterConfig_NumIterationsType*") IntPointer value); @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_NumIterationsType_Parse( @StdString String name, @Cast("tensorflow::RewriterConfig_NumIterationsType*") IntBuffer value); @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_NumIterationsType_Parse( @StdString BytePointer name, @Cast("tensorflow::RewriterConfig_NumIterationsType*") int... value); @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_NumIterationsType_Parse( @StdString String name, @Cast("tensorflow::RewriterConfig_NumIterationsType*") IntPointer value); @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_NumIterationsType_Parse( @StdString BytePointer name, @Cast("tensorflow::RewriterConfig_NumIterationsType*") IntBuffer value); @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_NumIterationsType_Parse( @StdString String name, @Cast("tensorflow::RewriterConfig_NumIterationsType*") int... value); /** enum tensorflow::RewriterConfig_MemOptType */ public static final int RewriterConfig_MemOptType_DEFAULT_MEM_OPT = 0, RewriterConfig_MemOptType_NO_MEM_OPT = 1, RewriterConfig_MemOptType_MANUAL = 2, RewriterConfig_MemOptType_SWAPPING_HEURISTICS = 4, RewriterConfig_MemOptType_RECOMPUTATION_HEURISTICS = 5, RewriterConfig_MemOptType_SCHEDULING_HEURISTICS = 6, RewriterConfig_MemOptType_HEURISTICS = 3, RewriterConfig_MemOptType_RewriterConfig_MemOptType_INT_MIN_SENTINEL_DO_NOT_USE_ = kint32min, RewriterConfig_MemOptType_RewriterConfig_MemOptType_INT_MAX_SENTINEL_DO_NOT_USE_ = kint32max; @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_MemOptType_IsValid(int value); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::RewriterConfig_MemOptType") int RewriterConfig_MemOptType_MemOptType_MIN(); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::RewriterConfig_MemOptType") int RewriterConfig_MemOptType_MemOptType_MAX(); @Namespace("tensorflow") @MemberGetter public static native int RewriterConfig_MemOptType_MemOptType_ARRAYSIZE(); @Namespace("tensorflow") public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer RewriterConfig_MemOptType_descriptor(); @Namespace("tensorflow") public static native @StdString BytePointer RewriterConfig_MemOptType_Name(@Cast("tensorflow::RewriterConfig_MemOptType") int value); @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_MemOptType_Parse( @StdString BytePointer name, @Cast("tensorflow::RewriterConfig_MemOptType*") IntPointer value); @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_MemOptType_Parse( @StdString String name, @Cast("tensorflow::RewriterConfig_MemOptType*") IntBuffer value); @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_MemOptType_Parse( @StdString BytePointer name, @Cast("tensorflow::RewriterConfig_MemOptType*") int... value); @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_MemOptType_Parse( @StdString String name, @Cast("tensorflow::RewriterConfig_MemOptType*") IntPointer value); @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_MemOptType_Parse( @StdString BytePointer name, @Cast("tensorflow::RewriterConfig_MemOptType*") IntBuffer value); @Namespace("tensorflow") public static native @Cast("bool") boolean RewriterConfig_MemOptType_Parse( @StdString String name, @Cast("tensorflow::RewriterConfig_MemOptType*") int... value); // =================================================================== @Namespace("tensorflow") @NoOffset public static class AutoParallelOptions extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AutoParallelOptions(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public AutoParallelOptions(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public AutoParallelOptions position(long position) { return (AutoParallelOptions)super.position(position); } public AutoParallelOptions() { super((Pointer)null); allocate(); } private native void allocate(); public AutoParallelOptions(@Const @ByRef AutoParallelOptions from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef AutoParallelOptions from); public native @ByRef @Name("operator =") AutoParallelOptions put(@Const @ByRef AutoParallelOptions from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef AutoParallelOptions default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const AutoParallelOptions internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(AutoParallelOptions other); public native void Swap(AutoParallelOptions other); // implements Message ---------------------------------------------- public native AutoParallelOptions New(); public native AutoParallelOptions New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef AutoParallelOptions from); public native void MergeFrom(@Const @ByRef AutoParallelOptions from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // bool enable = 1; public native void clear_enable(); @MemberGetter public static native int kEnableFieldNumber(); public static final int kEnableFieldNumber = kEnableFieldNumber(); public native @Cast("bool") boolean enable(); public native void set_enable(@Cast("bool") boolean value); // int32 num_replicas = 2; public native void clear_num_replicas(); @MemberGetter public static native int kNumReplicasFieldNumber(); public static final int kNumReplicasFieldNumber = kNumReplicasFieldNumber(); public native @Cast("google::protobuf::int32") int num_replicas(); public native void set_num_replicas(@Cast("google::protobuf::int32") int value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class ScopedAllocatorOptions extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ScopedAllocatorOptions(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public ScopedAllocatorOptions(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public ScopedAllocatorOptions position(long position) { return (ScopedAllocatorOptions)super.position(position); } public ScopedAllocatorOptions() { super((Pointer)null); allocate(); } private native void allocate(); public ScopedAllocatorOptions(@Const @ByRef ScopedAllocatorOptions from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef ScopedAllocatorOptions from); public native @ByRef @Name("operator =") ScopedAllocatorOptions put(@Const @ByRef ScopedAllocatorOptions from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef ScopedAllocatorOptions default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const ScopedAllocatorOptions internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(ScopedAllocatorOptions other); public native void Swap(ScopedAllocatorOptions other); // implements Message ---------------------------------------------- public native ScopedAllocatorOptions New(); public native ScopedAllocatorOptions New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef ScopedAllocatorOptions from); public native void MergeFrom(@Const @ByRef ScopedAllocatorOptions from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated string enable_op = 1; public native int enable_op_size(); public native void clear_enable_op(); @MemberGetter public static native int kEnableOpFieldNumber(); public static final int kEnableOpFieldNumber = kEnableOpFieldNumber(); public native @StdString BytePointer enable_op(int index); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_enable_op(int index); public native void set_enable_op(int index, @StdString BytePointer value); public native void set_enable_op(int index, @StdString String value); // #if LANG_CXX11 // #endif public native void set_enable_op(int index, @Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_enable_op(int index, String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_enable_op(); public native void add_enable_op(@StdString BytePointer value); public native void add_enable_op(@StdString String value); // #if LANG_CXX11 // #endif public native void add_enable_op(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void add_enable_op(String value, @Cast("size_t") long size); } // ------------------------------------------------------------------- // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class RewriterConfig_CustomGraphOptimizer extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RewriterConfig_CustomGraphOptimizer(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public RewriterConfig_CustomGraphOptimizer(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public RewriterConfig_CustomGraphOptimizer position(long position) { return (RewriterConfig_CustomGraphOptimizer)super.position(position); } public RewriterConfig_CustomGraphOptimizer() { super((Pointer)null); allocate(); } private native void allocate(); public RewriterConfig_CustomGraphOptimizer(@Const @ByRef RewriterConfig_CustomGraphOptimizer from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef RewriterConfig_CustomGraphOptimizer from); public native @ByRef @Name("operator =") RewriterConfig_CustomGraphOptimizer put(@Const @ByRef RewriterConfig_CustomGraphOptimizer from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef RewriterConfig_CustomGraphOptimizer default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const RewriterConfig_CustomGraphOptimizer internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(RewriterConfig_CustomGraphOptimizer other); public native void Swap(RewriterConfig_CustomGraphOptimizer other); // implements Message ---------------------------------------------- public native RewriterConfig_CustomGraphOptimizer New(); public native RewriterConfig_CustomGraphOptimizer New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef RewriterConfig_CustomGraphOptimizer from); public native void MergeFrom(@Const @ByRef RewriterConfig_CustomGraphOptimizer from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // map parameter_map = 2; public native int parameter_map_size(); public native void clear_parameter_map(); @MemberGetter public static native int kParameterMapFieldNumber(); public static final int kParameterMapFieldNumber = kParameterMapFieldNumber(); public native @Const @ByRef StringAttrValueMap parameter_map(); public native StringAttrValueMap mutable_parameter_map(); // string name = 1; public native void clear_name(); @MemberGetter public static native int kNameFieldNumber(); public static final int kNameFieldNumber = kNameFieldNumber(); public native @StdString BytePointer name(); public native void set_name(@StdString BytePointer value); public native void set_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name(); public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_name(); public native @Deprecated void unsafe_arena_set_allocated_name( @StdString @Cast({"char*", "std::string*"}) BytePointer name); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class RewriterConfig extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RewriterConfig(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public RewriterConfig(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public RewriterConfig position(long position) { return (RewriterConfig)super.position(position); } public RewriterConfig() { super((Pointer)null); allocate(); } private native void allocate(); public RewriterConfig(@Const @ByRef RewriterConfig from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef RewriterConfig from); public native @ByRef @Name("operator =") RewriterConfig put(@Const @ByRef RewriterConfig from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef RewriterConfig default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const RewriterConfig internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(RewriterConfig other); public native void Swap(RewriterConfig other); // implements Message ---------------------------------------------- public native RewriterConfig New(); public native RewriterConfig New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef RewriterConfig from); public native void MergeFrom(@Const @ByRef RewriterConfig from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- @MemberGetter public static native @Cast("const tensorflow::RewriterConfig::Toggle") int DEFAULT(); public static final int DEFAULT = DEFAULT(); @MemberGetter public static native @Cast("const tensorflow::RewriterConfig::Toggle") int ON(); public static final int ON = ON(); @MemberGetter public static native @Cast("const tensorflow::RewriterConfig::Toggle") int OFF(); public static final int OFF = OFF(); @MemberGetter public static native @Cast("const tensorflow::RewriterConfig::Toggle") int AGGRESSIVE(); public static final int AGGRESSIVE = AGGRESSIVE(); public static native @Cast("bool") boolean Toggle_IsValid(int value); @MemberGetter public static native @Cast("const tensorflow::RewriterConfig::Toggle") int Toggle_MIN(); public static final int Toggle_MIN = Toggle_MIN(); @MemberGetter public static native @Cast("const tensorflow::RewriterConfig::Toggle") int Toggle_MAX(); public static final int Toggle_MAX = Toggle_MAX(); @MemberGetter public static native int Toggle_ARRAYSIZE(); public static final int Toggle_ARRAYSIZE = Toggle_ARRAYSIZE(); public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer Toggle_descriptor(); public static native @StdString BytePointer Toggle_Name(@Cast("tensorflow::RewriterConfig::Toggle") int value); public static native @Cast("bool") boolean Toggle_Parse(@StdString BytePointer name, @Cast("tensorflow::RewriterConfig::Toggle*") IntPointer value); public static native @Cast("bool") boolean Toggle_Parse(@StdString String name, @Cast("tensorflow::RewriterConfig::Toggle*") IntBuffer value); public static native @Cast("bool") boolean Toggle_Parse(@StdString BytePointer name, @Cast("tensorflow::RewriterConfig::Toggle*") int... value); public static native @Cast("bool") boolean Toggle_Parse(@StdString String name, @Cast("tensorflow::RewriterConfig::Toggle*") IntPointer value); public static native @Cast("bool") boolean Toggle_Parse(@StdString BytePointer name, @Cast("tensorflow::RewriterConfig::Toggle*") IntBuffer value); public static native @Cast("bool") boolean Toggle_Parse(@StdString String name, @Cast("tensorflow::RewriterConfig::Toggle*") int... value); @MemberGetter public static native @Cast("const tensorflow::RewriterConfig::NumIterationsType") int DEFAULT_NUM_ITERS(); public static final int DEFAULT_NUM_ITERS = DEFAULT_NUM_ITERS(); @MemberGetter public static native @Cast("const tensorflow::RewriterConfig::NumIterationsType") int ONE(); public static final int ONE = ONE(); @MemberGetter public static native @Cast("const tensorflow::RewriterConfig::NumIterationsType") int TWO(); public static final int TWO = TWO(); public static native @Cast("bool") boolean NumIterationsType_IsValid(int value); @MemberGetter public static native @Cast("const tensorflow::RewriterConfig::NumIterationsType") int NumIterationsType_MIN(); public static final int NumIterationsType_MIN = NumIterationsType_MIN(); @MemberGetter public static native @Cast("const tensorflow::RewriterConfig::NumIterationsType") int NumIterationsType_MAX(); public static final int NumIterationsType_MAX = NumIterationsType_MAX(); @MemberGetter public static native int NumIterationsType_ARRAYSIZE(); public static final int NumIterationsType_ARRAYSIZE = NumIterationsType_ARRAYSIZE(); public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer NumIterationsType_descriptor(); public static native @StdString BytePointer NumIterationsType_Name(@Cast("tensorflow::RewriterConfig::NumIterationsType") int value); public static native @Cast("bool") boolean NumIterationsType_Parse(@StdString BytePointer name, @Cast("tensorflow::RewriterConfig::NumIterationsType*") IntPointer value); public static native @Cast("bool") boolean NumIterationsType_Parse(@StdString String name, @Cast("tensorflow::RewriterConfig::NumIterationsType*") IntBuffer value); public static native @Cast("bool") boolean NumIterationsType_Parse(@StdString BytePointer name, @Cast("tensorflow::RewriterConfig::NumIterationsType*") int... value); public static native @Cast("bool") boolean NumIterationsType_Parse(@StdString String name, @Cast("tensorflow::RewriterConfig::NumIterationsType*") IntPointer value); public static native @Cast("bool") boolean NumIterationsType_Parse(@StdString BytePointer name, @Cast("tensorflow::RewriterConfig::NumIterationsType*") IntBuffer value); public static native @Cast("bool") boolean NumIterationsType_Parse(@StdString String name, @Cast("tensorflow::RewriterConfig::NumIterationsType*") int... value); @MemberGetter public static native @Cast("const tensorflow::RewriterConfig::MemOptType") int DEFAULT_MEM_OPT(); public static final int DEFAULT_MEM_OPT = DEFAULT_MEM_OPT(); @MemberGetter public static native @Cast("const tensorflow::RewriterConfig::MemOptType") int NO_MEM_OPT(); public static final int NO_MEM_OPT = NO_MEM_OPT(); @MemberGetter public static native @Cast("const tensorflow::RewriterConfig::MemOptType") int MANUAL(); public static final int MANUAL = MANUAL(); @MemberGetter public static native @Cast("const tensorflow::RewriterConfig::MemOptType") int SWAPPING_HEURISTICS(); public static final int SWAPPING_HEURISTICS = SWAPPING_HEURISTICS(); @MemberGetter public static native @Cast("const tensorflow::RewriterConfig::MemOptType") int RECOMPUTATION_HEURISTICS(); public static final int RECOMPUTATION_HEURISTICS = RECOMPUTATION_HEURISTICS(); @MemberGetter public static native @Cast("const tensorflow::RewriterConfig::MemOptType") int SCHEDULING_HEURISTICS(); public static final int SCHEDULING_HEURISTICS = SCHEDULING_HEURISTICS(); @MemberGetter public static native @Cast("const tensorflow::RewriterConfig::MemOptType") int HEURISTICS(); public static final int HEURISTICS = HEURISTICS(); public static native @Cast("bool") boolean MemOptType_IsValid(int value); @MemberGetter public static native @Cast("const tensorflow::RewriterConfig::MemOptType") int MemOptType_MIN(); public static final int MemOptType_MIN = MemOptType_MIN(); @MemberGetter public static native @Cast("const tensorflow::RewriterConfig::MemOptType") int MemOptType_MAX(); public static final int MemOptType_MAX = MemOptType_MAX(); @MemberGetter public static native int MemOptType_ARRAYSIZE(); public static final int MemOptType_ARRAYSIZE = MemOptType_ARRAYSIZE(); public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer MemOptType_descriptor(); public static native @StdString BytePointer MemOptType_Name(@Cast("tensorflow::RewriterConfig::MemOptType") int value); public static native @Cast("bool") boolean MemOptType_Parse(@StdString BytePointer name, @Cast("tensorflow::RewriterConfig::MemOptType*") IntPointer value); public static native @Cast("bool") boolean MemOptType_Parse(@StdString String name, @Cast("tensorflow::RewriterConfig::MemOptType*") IntBuffer value); public static native @Cast("bool") boolean MemOptType_Parse(@StdString BytePointer name, @Cast("tensorflow::RewriterConfig::MemOptType*") int... value); public static native @Cast("bool") boolean MemOptType_Parse(@StdString String name, @Cast("tensorflow::RewriterConfig::MemOptType*") IntPointer value); public static native @Cast("bool") boolean MemOptType_Parse(@StdString BytePointer name, @Cast("tensorflow::RewriterConfig::MemOptType*") IntBuffer value); public static native @Cast("bool") boolean MemOptType_Parse(@StdString String name, @Cast("tensorflow::RewriterConfig::MemOptType*") int... value); // accessors ------------------------------------------------------- // repeated string optimizers = 100; public native int optimizers_size(); public native void clear_optimizers(); @MemberGetter public static native int kOptimizersFieldNumber(); public static final int kOptimizersFieldNumber = kOptimizersFieldNumber(); public native @StdString BytePointer optimizers(int index); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_optimizers(int index); public native void set_optimizers(int index, @StdString BytePointer value); public native void set_optimizers(int index, @StdString String value); // #if LANG_CXX11 // #endif public native void set_optimizers(int index, @Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_optimizers(int index, String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_optimizers(); public native void add_optimizers(@StdString BytePointer value); public native void add_optimizers(@StdString String value); // #if LANG_CXX11 // #endif public native void add_optimizers(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void add_optimizers(String value, @Cast("size_t") long size); // repeated .tensorflow.RewriterConfig.CustomGraphOptimizer custom_optimizers = 200; public native int custom_optimizers_size(); public native void clear_custom_optimizers(); @MemberGetter public static native int kCustomOptimizersFieldNumber(); public static final int kCustomOptimizersFieldNumber = kCustomOptimizersFieldNumber(); public native RewriterConfig_CustomGraphOptimizer mutable_custom_optimizers(int index); public native @Const @ByRef RewriterConfig_CustomGraphOptimizer custom_optimizers(int index); public native RewriterConfig_CustomGraphOptimizer add_custom_optimizers(); // string memory_optimizer_target_node_name_scope = 6; public native void clear_memory_optimizer_target_node_name_scope(); @MemberGetter public static native int kMemoryOptimizerTargetNodeNameScopeFieldNumber(); public static final int kMemoryOptimizerTargetNodeNameScopeFieldNumber = kMemoryOptimizerTargetNodeNameScopeFieldNumber(); public native @StdString BytePointer memory_optimizer_target_node_name_scope(); public native void set_memory_optimizer_target_node_name_scope(@StdString BytePointer value); public native void set_memory_optimizer_target_node_name_scope(@StdString String value); // #if LANG_CXX11 // #endif public native void set_memory_optimizer_target_node_name_scope(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_memory_optimizer_target_node_name_scope(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_memory_optimizer_target_node_name_scope(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_memory_optimizer_target_node_name_scope(); public native void set_allocated_memory_optimizer_target_node_name_scope(@StdString @Cast({"char*", "std::string*"}) BytePointer memory_optimizer_target_node_name_scope); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_memory_optimizer_target_node_name_scope(); public native @Deprecated void unsafe_arena_set_allocated_memory_optimizer_target_node_name_scope( @StdString @Cast({"char*", "std::string*"}) BytePointer memory_optimizer_target_node_name_scope); // .tensorflow.AutoParallelOptions auto_parallel = 5; public native @Cast("bool") boolean has_auto_parallel(); public native void clear_auto_parallel(); @MemberGetter public static native int kAutoParallelFieldNumber(); public static final int kAutoParallelFieldNumber = kAutoParallelFieldNumber(); public native @Const @ByRef AutoParallelOptions auto_parallel(); public native AutoParallelOptions release_auto_parallel(); public native AutoParallelOptions mutable_auto_parallel(); public native void set_allocated_auto_parallel(AutoParallelOptions auto_parallel); public native void unsafe_arena_set_allocated_auto_parallel( AutoParallelOptions auto_parallel); public native AutoParallelOptions unsafe_arena_release_auto_parallel(); // .tensorflow.ScopedAllocatorOptions scoped_allocator_opts = 16; public native @Cast("bool") boolean has_scoped_allocator_opts(); public native void clear_scoped_allocator_opts(); @MemberGetter public static native int kScopedAllocatorOptsFieldNumber(); public static final int kScopedAllocatorOptsFieldNumber = kScopedAllocatorOptsFieldNumber(); public native @Const @ByRef ScopedAllocatorOptions scoped_allocator_opts(); public native ScopedAllocatorOptions release_scoped_allocator_opts(); public native ScopedAllocatorOptions mutable_scoped_allocator_opts(); public native void set_allocated_scoped_allocator_opts(ScopedAllocatorOptions scoped_allocator_opts); public native void unsafe_arena_set_allocated_scoped_allocator_opts( ScopedAllocatorOptions scoped_allocator_opts); public native ScopedAllocatorOptions unsafe_arena_release_scoped_allocator_opts(); // .tensorflow.RewriterConfig.Toggle layout_optimizer = 1; public native void clear_layout_optimizer(); @MemberGetter public static native int kLayoutOptimizerFieldNumber(); public static final int kLayoutOptimizerFieldNumber = kLayoutOptimizerFieldNumber(); public native @Cast("tensorflow::RewriterConfig_Toggle") int layout_optimizer(); public native void set_layout_optimizer(@Cast("tensorflow::RewriterConfig_Toggle") int value); // .tensorflow.RewriterConfig.Toggle constant_folding = 3; public native void clear_constant_folding(); @MemberGetter public static native int kConstantFoldingFieldNumber(); public static final int kConstantFoldingFieldNumber = kConstantFoldingFieldNumber(); public native @Cast("tensorflow::RewriterConfig_Toggle") int constant_folding(); public native void set_constant_folding(@Cast("tensorflow::RewriterConfig_Toggle") int value); // .tensorflow.RewriterConfig.MemOptType memory_optimization = 4; public native void clear_memory_optimization(); @MemberGetter public static native int kMemoryOptimizationFieldNumber(); public static final int kMemoryOptimizationFieldNumber = kMemoryOptimizationFieldNumber(); public native @Cast("tensorflow::RewriterConfig_MemOptType") int memory_optimization(); public native void set_memory_optimization(@Cast("tensorflow::RewriterConfig_MemOptType") int value); // .tensorflow.RewriterConfig.Toggle arithmetic_optimization = 7; public native void clear_arithmetic_optimization(); @MemberGetter public static native int kArithmeticOptimizationFieldNumber(); public static final int kArithmeticOptimizationFieldNumber = kArithmeticOptimizationFieldNumber(); public native @Cast("tensorflow::RewriterConfig_Toggle") int arithmetic_optimization(); public native void set_arithmetic_optimization(@Cast("tensorflow::RewriterConfig_Toggle") int value); // .tensorflow.RewriterConfig.Toggle dependency_optimization = 8; public native void clear_dependency_optimization(); @MemberGetter public static native int kDependencyOptimizationFieldNumber(); public static final int kDependencyOptimizationFieldNumber = kDependencyOptimizationFieldNumber(); public native @Cast("tensorflow::RewriterConfig_Toggle") int dependency_optimization(); public native void set_dependency_optimization(@Cast("tensorflow::RewriterConfig_Toggle") int value); // .tensorflow.RewriterConfig.Toggle loop_optimization = 9; public native void clear_loop_optimization(); @MemberGetter public static native int kLoopOptimizationFieldNumber(); public static final int kLoopOptimizationFieldNumber = kLoopOptimizationFieldNumber(); public native @Cast("tensorflow::RewriterConfig_Toggle") int loop_optimization(); public native void set_loop_optimization(@Cast("tensorflow::RewriterConfig_Toggle") int value); // .tensorflow.RewriterConfig.Toggle function_optimization = 10; public native void clear_function_optimization(); @MemberGetter public static native int kFunctionOptimizationFieldNumber(); public static final int kFunctionOptimizationFieldNumber = kFunctionOptimizationFieldNumber(); public native @Cast("tensorflow::RewriterConfig_Toggle") int function_optimization(); public native void set_function_optimization(@Cast("tensorflow::RewriterConfig_Toggle") int value); // bool disable_model_pruning = 2; public native void clear_disable_model_pruning(); @MemberGetter public static native int kDisableModelPruningFieldNumber(); public static final int kDisableModelPruningFieldNumber = kDisableModelPruningFieldNumber(); public native @Cast("bool") boolean disable_model_pruning(); public native void set_disable_model_pruning(@Cast("bool") boolean value); // bool disable_meta_optimizer = 19; public native void clear_disable_meta_optimizer(); @MemberGetter public static native int kDisableMetaOptimizerFieldNumber(); public static final int kDisableMetaOptimizerFieldNumber = kDisableMetaOptimizerFieldNumber(); public native @Cast("bool") boolean disable_meta_optimizer(); public native void set_disable_meta_optimizer(@Cast("bool") boolean value); // .tensorflow.RewriterConfig.Toggle debug_stripper = 11; public native void clear_debug_stripper(); @MemberGetter public static native int kDebugStripperFieldNumber(); public static final int kDebugStripperFieldNumber = kDebugStripperFieldNumber(); public native @Cast("tensorflow::RewriterConfig_Toggle") int debug_stripper(); public native void set_debug_stripper(@Cast("tensorflow::RewriterConfig_Toggle") int value); // .tensorflow.RewriterConfig.NumIterationsType meta_optimizer_iterations = 12; public native void clear_meta_optimizer_iterations(); @MemberGetter public static native int kMetaOptimizerIterationsFieldNumber(); public static final int kMetaOptimizerIterationsFieldNumber = kMetaOptimizerIterationsFieldNumber(); public native @Cast("tensorflow::RewriterConfig_NumIterationsType") int meta_optimizer_iterations(); public native void set_meta_optimizer_iterations(@Cast("tensorflow::RewriterConfig_NumIterationsType") int value); // .tensorflow.RewriterConfig.Toggle shape_optimization = 13; public native void clear_shape_optimization(); @MemberGetter public static native int kShapeOptimizationFieldNumber(); public static final int kShapeOptimizationFieldNumber = kShapeOptimizationFieldNumber(); public native @Cast("tensorflow::RewriterConfig_Toggle") int shape_optimization(); public native void set_shape_optimization(@Cast("tensorflow::RewriterConfig_Toggle") int value); // .tensorflow.RewriterConfig.Toggle remapping = 14; public native void clear_remapping(); @MemberGetter public static native int kRemappingFieldNumber(); public static final int kRemappingFieldNumber = kRemappingFieldNumber(); public native @Cast("tensorflow::RewriterConfig_Toggle") int remapping(); public native void set_remapping(@Cast("tensorflow::RewriterConfig_Toggle") int value); // .tensorflow.RewriterConfig.Toggle scoped_allocator_optimization = 15; public native void clear_scoped_allocator_optimization(); @MemberGetter public static native int kScopedAllocatorOptimizationFieldNumber(); public static final int kScopedAllocatorOptimizationFieldNumber = kScopedAllocatorOptimizationFieldNumber(); public native @Cast("tensorflow::RewriterConfig_Toggle") int scoped_allocator_optimization(); public native void set_scoped_allocator_optimization(@Cast("tensorflow::RewriterConfig_Toggle") int value); // int32 min_graph_nodes = 17; public native void clear_min_graph_nodes(); @MemberGetter public static native int kMinGraphNodesFieldNumber(); public static final int kMinGraphNodesFieldNumber = kMinGraphNodesFieldNumber(); public native @Cast("google::protobuf::int32") int min_graph_nodes(); public native void set_min_graph_nodes(@Cast("google::protobuf::int32") int value); // .tensorflow.RewriterConfig.Toggle pin_to_host_optimization = 18; public native void clear_pin_to_host_optimization(); @MemberGetter public static native int kPinToHostOptimizationFieldNumber(); public static final int kPinToHostOptimizationFieldNumber = kPinToHostOptimizationFieldNumber(); public native @Cast("tensorflow::RewriterConfig_Toggle") int pin_to_host_optimization(); public native void set_pin_to_host_optimization(@Cast("tensorflow::RewriterConfig_Toggle") int value); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // AutoParallelOptions // bool enable = 1; // int32 num_replicas = 2; // ------------------------------------------------------------------- // ScopedAllocatorOptions // repeated string enable_op = 1; // #if LANG_CXX11 // #endif // #if LANG_CXX11 // #endif // ------------------------------------------------------------------- // ------------------------------------------------------------------- // RewriterConfig_CustomGraphOptimizer // string name = 1; // #if LANG_CXX11 // #endif // map parameter_map = 2; // ------------------------------------------------------------------- // RewriterConfig // .tensorflow.RewriterConfig.Toggle layout_optimizer = 1; // .tensorflow.RewriterConfig.Toggle constant_folding = 3; // .tensorflow.RewriterConfig.Toggle shape_optimization = 13; // .tensorflow.RewriterConfig.Toggle remapping = 14; // .tensorflow.RewriterConfig.Toggle arithmetic_optimization = 7; // .tensorflow.RewriterConfig.Toggle dependency_optimization = 8; // .tensorflow.RewriterConfig.Toggle loop_optimization = 9; // .tensorflow.RewriterConfig.Toggle function_optimization = 10; // .tensorflow.RewriterConfig.Toggle debug_stripper = 11; // bool disable_model_pruning = 2; // .tensorflow.RewriterConfig.Toggle scoped_allocator_optimization = 15; // .tensorflow.RewriterConfig.Toggle pin_to_host_optimization = 18; // bool disable_meta_optimizer = 19; // .tensorflow.RewriterConfig.NumIterationsType meta_optimizer_iterations = 12; // int32 min_graph_nodes = 17; // .tensorflow.RewriterConfig.MemOptType memory_optimization = 4; // string memory_optimizer_target_node_name_scope = 6; // #if LANG_CXX11 // #endif // .tensorflow.AutoParallelOptions auto_parallel = 5; // .tensorflow.ScopedAllocatorOptions scoped_allocator_opts = 16; // repeated string optimizers = 100; // #if LANG_CXX11 // #endif // #if LANG_CXX11 // #endif // repeated .tensorflow.RewriterConfig.CustomGraphOptimizer custom_optimizers = 200; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // namespace protobuf // namespace google // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2frewriter_5fconfig_2eproto // Parsed from tensorflow/core/protobuf/config.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/protobuf/config.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // #include // #include // #include // #include "tensorflow/core/framework/cost_graph.pb.h" // #include "tensorflow/core/framework/graph.pb.h" // #include "tensorflow/core/framework/step_stats.pb.h" // #include "tensorflow/core/protobuf/debug.pb.h" // #include "tensorflow/core/protobuf/cluster.pb.h" // #include "tensorflow/core/protobuf/rewriter_config.pb.h" // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto @Namespace("tensorflow") @Opaque public static class CallableOptions_FeedDevicesEntry_DoNotUse extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public CallableOptions_FeedDevicesEntry_DoNotUse() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CallableOptions_FeedDevicesEntry_DoNotUse(Pointer p) { super(p); } } @Namespace("tensorflow") @Opaque public static class CallableOptions_FetchDevicesEntry_DoNotUse extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public CallableOptions_FetchDevicesEntry_DoNotUse() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CallableOptions_FetchDevicesEntry_DoNotUse(Pointer p) { super(p); } } @Namespace("tensorflow") @Opaque public static class ConfigProto_DeviceCountEntry_DoNotUse extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public ConfigProto_DeviceCountEntry_DoNotUse() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConfigProto_DeviceCountEntry_DoNotUse(Pointer p) { super(p); } } // namespace tensorflow // namespace protobuf // namespace google /** enum tensorflow::OptimizerOptions_Level */ public static final int OptimizerOptions_Level_L1 = 0, OptimizerOptions_Level_L0 = -1, OptimizerOptions_Level_OptimizerOptions_Level_INT_MIN_SENTINEL_DO_NOT_USE_ = kint32min, OptimizerOptions_Level_OptimizerOptions_Level_INT_MAX_SENTINEL_DO_NOT_USE_ = kint32max; @Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_Level_IsValid(int value); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::OptimizerOptions_Level") int OptimizerOptions_Level_Level_MIN(); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::OptimizerOptions_Level") int OptimizerOptions_Level_Level_MAX(); @Namespace("tensorflow") @MemberGetter public static native int OptimizerOptions_Level_Level_ARRAYSIZE(); @Namespace("tensorflow") public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer OptimizerOptions_Level_descriptor(); @Namespace("tensorflow") public static native @StdString BytePointer OptimizerOptions_Level_Name(@Cast("tensorflow::OptimizerOptions_Level") int value); @Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_Level_Parse( @StdString BytePointer name, @Cast("tensorflow::OptimizerOptions_Level*") IntPointer value); @Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_Level_Parse( @StdString String name, @Cast("tensorflow::OptimizerOptions_Level*") IntBuffer value); @Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_Level_Parse( @StdString BytePointer name, @Cast("tensorflow::OptimizerOptions_Level*") int... value); @Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_Level_Parse( @StdString String name, @Cast("tensorflow::OptimizerOptions_Level*") IntPointer value); @Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_Level_Parse( @StdString BytePointer name, @Cast("tensorflow::OptimizerOptions_Level*") IntBuffer value); @Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_Level_Parse( @StdString String name, @Cast("tensorflow::OptimizerOptions_Level*") int... value); /** enum tensorflow::OptimizerOptions_GlobalJitLevel */ public static final int OptimizerOptions_GlobalJitLevel_DEFAULT = 0, OptimizerOptions_GlobalJitLevel_OFF = -1, OptimizerOptions_GlobalJitLevel_ON_1 = 1, OptimizerOptions_GlobalJitLevel_ON_2 = 2, OptimizerOptions_GlobalJitLevel_OptimizerOptions_GlobalJitLevel_INT_MIN_SENTINEL_DO_NOT_USE_ = kint32min, OptimizerOptions_GlobalJitLevel_OptimizerOptions_GlobalJitLevel_INT_MAX_SENTINEL_DO_NOT_USE_ = kint32max; @Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_GlobalJitLevel_IsValid(int value); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::OptimizerOptions_GlobalJitLevel") int OptimizerOptions_GlobalJitLevel_GlobalJitLevel_MIN(); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::OptimizerOptions_GlobalJitLevel") int OptimizerOptions_GlobalJitLevel_GlobalJitLevel_MAX(); @Namespace("tensorflow") @MemberGetter public static native int OptimizerOptions_GlobalJitLevel_GlobalJitLevel_ARRAYSIZE(); @Namespace("tensorflow") public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer OptimizerOptions_GlobalJitLevel_descriptor(); @Namespace("tensorflow") public static native @StdString BytePointer OptimizerOptions_GlobalJitLevel_Name(@Cast("tensorflow::OptimizerOptions_GlobalJitLevel") int value); @Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_GlobalJitLevel_Parse( @StdString BytePointer name, @Cast("tensorflow::OptimizerOptions_GlobalJitLevel*") IntPointer value); @Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_GlobalJitLevel_Parse( @StdString String name, @Cast("tensorflow::OptimizerOptions_GlobalJitLevel*") IntBuffer value); @Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_GlobalJitLevel_Parse( @StdString BytePointer name, @Cast("tensorflow::OptimizerOptions_GlobalJitLevel*") int... value); @Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_GlobalJitLevel_Parse( @StdString String name, @Cast("tensorflow::OptimizerOptions_GlobalJitLevel*") IntPointer value); @Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_GlobalJitLevel_Parse( @StdString BytePointer name, @Cast("tensorflow::OptimizerOptions_GlobalJitLevel*") IntBuffer value); @Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_GlobalJitLevel_Parse( @StdString String name, @Cast("tensorflow::OptimizerOptions_GlobalJitLevel*") int... value); /** enum tensorflow::RunOptions_TraceLevel */ public static final int RunOptions_TraceLevel_NO_TRACE = 0, RunOptions_TraceLevel_SOFTWARE_TRACE = 1, RunOptions_TraceLevel_HARDWARE_TRACE = 2, RunOptions_TraceLevel_FULL_TRACE = 3, RunOptions_TraceLevel_RunOptions_TraceLevel_INT_MIN_SENTINEL_DO_NOT_USE_ = kint32min, RunOptions_TraceLevel_RunOptions_TraceLevel_INT_MAX_SENTINEL_DO_NOT_USE_ = kint32max; @Namespace("tensorflow") public static native @Cast("bool") boolean RunOptions_TraceLevel_IsValid(int value); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::RunOptions_TraceLevel") int RunOptions_TraceLevel_TraceLevel_MIN(); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::RunOptions_TraceLevel") int RunOptions_TraceLevel_TraceLevel_MAX(); @Namespace("tensorflow") @MemberGetter public static native int RunOptions_TraceLevel_TraceLevel_ARRAYSIZE(); @Namespace("tensorflow") public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer RunOptions_TraceLevel_descriptor(); @Namespace("tensorflow") public static native @StdString BytePointer RunOptions_TraceLevel_Name(@Cast("tensorflow::RunOptions_TraceLevel") int value); @Namespace("tensorflow") public static native @Cast("bool") boolean RunOptions_TraceLevel_Parse( @StdString BytePointer name, @Cast("tensorflow::RunOptions_TraceLevel*") IntPointer value); @Namespace("tensorflow") public static native @Cast("bool") boolean RunOptions_TraceLevel_Parse( @StdString String name, @Cast("tensorflow::RunOptions_TraceLevel*") IntBuffer value); @Namespace("tensorflow") public static native @Cast("bool") boolean RunOptions_TraceLevel_Parse( @StdString BytePointer name, @Cast("tensorflow::RunOptions_TraceLevel*") int... value); @Namespace("tensorflow") public static native @Cast("bool") boolean RunOptions_TraceLevel_Parse( @StdString String name, @Cast("tensorflow::RunOptions_TraceLevel*") IntPointer value); @Namespace("tensorflow") public static native @Cast("bool") boolean RunOptions_TraceLevel_Parse( @StdString BytePointer name, @Cast("tensorflow::RunOptions_TraceLevel*") IntBuffer value); @Namespace("tensorflow") public static native @Cast("bool") boolean RunOptions_TraceLevel_Parse( @StdString String name, @Cast("tensorflow::RunOptions_TraceLevel*") int... value); // =================================================================== @Namespace("tensorflow") @NoOffset public static class GPUOptions_Experimental_VirtualDevices extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GPUOptions_Experimental_VirtualDevices(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public GPUOptions_Experimental_VirtualDevices(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public GPUOptions_Experimental_VirtualDevices position(long position) { return (GPUOptions_Experimental_VirtualDevices)super.position(position); } public GPUOptions_Experimental_VirtualDevices() { super((Pointer)null); allocate(); } private native void allocate(); public GPUOptions_Experimental_VirtualDevices(@Const @ByRef GPUOptions_Experimental_VirtualDevices from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef GPUOptions_Experimental_VirtualDevices from); public native @ByRef @Name("operator =") GPUOptions_Experimental_VirtualDevices put(@Const @ByRef GPUOptions_Experimental_VirtualDevices from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef GPUOptions_Experimental_VirtualDevices default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const GPUOptions_Experimental_VirtualDevices internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(GPUOptions_Experimental_VirtualDevices other); public native void Swap(GPUOptions_Experimental_VirtualDevices other); // implements Message ---------------------------------------------- public native GPUOptions_Experimental_VirtualDevices New(); public native GPUOptions_Experimental_VirtualDevices New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef GPUOptions_Experimental_VirtualDevices from); public native void MergeFrom(@Const @ByRef GPUOptions_Experimental_VirtualDevices from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated float memory_limit_mb = 1; public native int memory_limit_mb_size(); public native void clear_memory_limit_mb(); @MemberGetter public static native int kMemoryLimitMbFieldNumber(); public static final int kMemoryLimitMbFieldNumber = kMemoryLimitMbFieldNumber(); public native float memory_limit_mb(int index); public native void set_memory_limit_mb(int index, float value); public native void add_memory_limit_mb(float value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class GPUOptions_Experimental extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GPUOptions_Experimental(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public GPUOptions_Experimental(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public GPUOptions_Experimental position(long position) { return (GPUOptions_Experimental)super.position(position); } public GPUOptions_Experimental() { super((Pointer)null); allocate(); } private native void allocate(); public GPUOptions_Experimental(@Const @ByRef GPUOptions_Experimental from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef GPUOptions_Experimental from); public native @ByRef @Name("operator =") GPUOptions_Experimental put(@Const @ByRef GPUOptions_Experimental from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef GPUOptions_Experimental default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const GPUOptions_Experimental internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(GPUOptions_Experimental other); public native void Swap(GPUOptions_Experimental other); // implements Message ---------------------------------------------- public native GPUOptions_Experimental New(); public native GPUOptions_Experimental New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef GPUOptions_Experimental from); public native void MergeFrom(@Const @ByRef GPUOptions_Experimental from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; public native int virtual_devices_size(); public native void clear_virtual_devices(); @MemberGetter public static native int kVirtualDevicesFieldNumber(); public static final int kVirtualDevicesFieldNumber = kVirtualDevicesFieldNumber(); public native GPUOptions_Experimental_VirtualDevices mutable_virtual_devices(int index); public native @Const @ByRef GPUOptions_Experimental_VirtualDevices virtual_devices(int index); public native GPUOptions_Experimental_VirtualDevices add_virtual_devices(); // bool use_unified_memory = 2; public native void clear_use_unified_memory(); @MemberGetter public static native int kUseUnifiedMemoryFieldNumber(); public static final int kUseUnifiedMemoryFieldNumber = kUseUnifiedMemoryFieldNumber(); public native @Cast("bool") boolean use_unified_memory(); public native void set_use_unified_memory(@Cast("bool") boolean value); // int32 num_dev_to_dev_copy_streams = 3; public native void clear_num_dev_to_dev_copy_streams(); @MemberGetter public static native int kNumDevToDevCopyStreamsFieldNumber(); public static final int kNumDevToDevCopyStreamsFieldNumber = kNumDevToDevCopyStreamsFieldNumber(); public native @Cast("google::protobuf::int32") int num_dev_to_dev_copy_streams(); public native void set_num_dev_to_dev_copy_streams(@Cast("google::protobuf::int32") int value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class GPUOptions extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GPUOptions(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public GPUOptions(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public GPUOptions position(long position) { return (GPUOptions)super.position(position); } public GPUOptions() { super((Pointer)null); allocate(); } private native void allocate(); public GPUOptions(@Const @ByRef GPUOptions from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef GPUOptions from); public native @ByRef @Name("operator =") GPUOptions put(@Const @ByRef GPUOptions from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef GPUOptions default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const GPUOptions internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(GPUOptions other); public native void Swap(GPUOptions other); // implements Message ---------------------------------------------- public native GPUOptions New(); public native GPUOptions New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef GPUOptions from); public native void MergeFrom(@Const @ByRef GPUOptions from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // string allocator_type = 2; public native void clear_allocator_type(); @MemberGetter public static native int kAllocatorTypeFieldNumber(); public static final int kAllocatorTypeFieldNumber = kAllocatorTypeFieldNumber(); public native @StdString BytePointer allocator_type(); public native void set_allocator_type(@StdString BytePointer value); public native void set_allocator_type(@StdString String value); // #if LANG_CXX11 // #endif public native void set_allocator_type(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_allocator_type(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_allocator_type(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_allocator_type(); public native void set_allocated_allocator_type(@StdString @Cast({"char*", "std::string*"}) BytePointer allocator_type); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_allocator_type(); public native @Deprecated void unsafe_arena_set_allocated_allocator_type( @StdString @Cast({"char*", "std::string*"}) BytePointer allocator_type); // string visible_device_list = 5; public native void clear_visible_device_list(); @MemberGetter public static native int kVisibleDeviceListFieldNumber(); public static final int kVisibleDeviceListFieldNumber = kVisibleDeviceListFieldNumber(); public native @StdString BytePointer visible_device_list(); public native void set_visible_device_list(@StdString BytePointer value); public native void set_visible_device_list(@StdString String value); // #if LANG_CXX11 // #endif public native void set_visible_device_list(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_visible_device_list(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_visible_device_list(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_visible_device_list(); public native void set_allocated_visible_device_list(@StdString @Cast({"char*", "std::string*"}) BytePointer visible_device_list); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_visible_device_list(); public native @Deprecated void unsafe_arena_set_allocated_visible_device_list( @StdString @Cast({"char*", "std::string*"}) BytePointer visible_device_list); // .tensorflow.GPUOptions.Experimental experimental = 9; public native @Cast("bool") boolean has_experimental(); public native void clear_experimental(); @MemberGetter public static native int kExperimentalFieldNumber(); public static final int kExperimentalFieldNumber = kExperimentalFieldNumber(); public native @Const @ByRef GPUOptions_Experimental experimental(); public native GPUOptions_Experimental release_experimental(); public native GPUOptions_Experimental mutable_experimental(); public native void set_allocated_experimental(GPUOptions_Experimental experimental); public native void unsafe_arena_set_allocated_experimental( GPUOptions_Experimental experimental); public native GPUOptions_Experimental unsafe_arena_release_experimental(); // double per_process_gpu_memory_fraction = 1; public native void clear_per_process_gpu_memory_fraction(); @MemberGetter public static native int kPerProcessGpuMemoryFractionFieldNumber(); public static final int kPerProcessGpuMemoryFractionFieldNumber = kPerProcessGpuMemoryFractionFieldNumber(); public native double per_process_gpu_memory_fraction(); public native void set_per_process_gpu_memory_fraction(double value); // int64 deferred_deletion_bytes = 3; public native void clear_deferred_deletion_bytes(); @MemberGetter public static native int kDeferredDeletionBytesFieldNumber(); public static final int kDeferredDeletionBytesFieldNumber = kDeferredDeletionBytesFieldNumber(); public native @Cast("google::protobuf::int64") long deferred_deletion_bytes(); public native void set_deferred_deletion_bytes(@Cast("google::protobuf::int64") long value); // int32 polling_active_delay_usecs = 6; public native void clear_polling_active_delay_usecs(); @MemberGetter public static native int kPollingActiveDelayUsecsFieldNumber(); public static final int kPollingActiveDelayUsecsFieldNumber = kPollingActiveDelayUsecsFieldNumber(); public native @Cast("google::protobuf::int32") int polling_active_delay_usecs(); public native void set_polling_active_delay_usecs(@Cast("google::protobuf::int32") int value); // bool allow_growth = 4; public native void clear_allow_growth(); @MemberGetter public static native int kAllowGrowthFieldNumber(); public static final int kAllowGrowthFieldNumber = kAllowGrowthFieldNumber(); public native @Cast("bool") boolean allow_growth(); public native void set_allow_growth(@Cast("bool") boolean value); // bool force_gpu_compatible = 8; public native void clear_force_gpu_compatible(); @MemberGetter public static native int kForceGpuCompatibleFieldNumber(); public static final int kForceGpuCompatibleFieldNumber = kForceGpuCompatibleFieldNumber(); public native @Cast("bool") boolean force_gpu_compatible(); public native void set_force_gpu_compatible(@Cast("bool") boolean value); // int32 polling_inactive_delay_msecs = 7; public native void clear_polling_inactive_delay_msecs(); @MemberGetter public static native int kPollingInactiveDelayMsecsFieldNumber(); public static final int kPollingInactiveDelayMsecsFieldNumber = kPollingInactiveDelayMsecsFieldNumber(); public native @Cast("google::protobuf::int32") int polling_inactive_delay_msecs(); public native void set_polling_inactive_delay_msecs(@Cast("google::protobuf::int32") int value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class OptimizerOptions extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OptimizerOptions(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public OptimizerOptions(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public OptimizerOptions position(long position) { return (OptimizerOptions)super.position(position); } public OptimizerOptions() { super((Pointer)null); allocate(); } private native void allocate(); public OptimizerOptions(@Const @ByRef OptimizerOptions from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef OptimizerOptions from); public native @ByRef @Name("operator =") OptimizerOptions put(@Const @ByRef OptimizerOptions from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef OptimizerOptions default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const OptimizerOptions internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(OptimizerOptions other); public native void Swap(OptimizerOptions other); // implements Message ---------------------------------------------- public native OptimizerOptions New(); public native OptimizerOptions New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef OptimizerOptions from); public native void MergeFrom(@Const @ByRef OptimizerOptions from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- @MemberGetter public static native @Cast("const tensorflow::OptimizerOptions::Level") int L1(); public static final int L1 = L1(); @MemberGetter public static native @Cast("const tensorflow::OptimizerOptions::Level") int L0(); public static final int L0 = L0(); public static native @Cast("bool") boolean Level_IsValid(int value); @MemberGetter public static native @Cast("const tensorflow::OptimizerOptions::Level") int Level_MIN(); public static final int Level_MIN = Level_MIN(); @MemberGetter public static native @Cast("const tensorflow::OptimizerOptions::Level") int Level_MAX(); public static final int Level_MAX = Level_MAX(); @MemberGetter public static native int Level_ARRAYSIZE(); public static final int Level_ARRAYSIZE = Level_ARRAYSIZE(); public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer Level_descriptor(); public static native @StdString BytePointer Level_Name(@Cast("tensorflow::OptimizerOptions::Level") int value); public static native @Cast("bool") boolean Level_Parse(@StdString BytePointer name, @Cast("tensorflow::OptimizerOptions::Level*") IntPointer value); public static native @Cast("bool") boolean Level_Parse(@StdString String name, @Cast("tensorflow::OptimizerOptions::Level*") IntBuffer value); public static native @Cast("bool") boolean Level_Parse(@StdString BytePointer name, @Cast("tensorflow::OptimizerOptions::Level*") int... value); public static native @Cast("bool") boolean Level_Parse(@StdString String name, @Cast("tensorflow::OptimizerOptions::Level*") IntPointer value); public static native @Cast("bool") boolean Level_Parse(@StdString BytePointer name, @Cast("tensorflow::OptimizerOptions::Level*") IntBuffer value); public static native @Cast("bool") boolean Level_Parse(@StdString String name, @Cast("tensorflow::OptimizerOptions::Level*") int... value); @MemberGetter public static native @Cast("const tensorflow::OptimizerOptions::GlobalJitLevel") int DEFAULT(); public static final int DEFAULT = DEFAULT(); @MemberGetter public static native @Cast("const tensorflow::OptimizerOptions::GlobalJitLevel") int OFF(); public static final int OFF = OFF(); @MemberGetter public static native @Cast("const tensorflow::OptimizerOptions::GlobalJitLevel") int ON_1(); public static final int ON_1 = ON_1(); @MemberGetter public static native @Cast("const tensorflow::OptimizerOptions::GlobalJitLevel") int ON_2(); public static final int ON_2 = ON_2(); public static native @Cast("bool") boolean GlobalJitLevel_IsValid(int value); @MemberGetter public static native @Cast("const tensorflow::OptimizerOptions::GlobalJitLevel") int GlobalJitLevel_MIN(); public static final int GlobalJitLevel_MIN = GlobalJitLevel_MIN(); @MemberGetter public static native @Cast("const tensorflow::OptimizerOptions::GlobalJitLevel") int GlobalJitLevel_MAX(); public static final int GlobalJitLevel_MAX = GlobalJitLevel_MAX(); @MemberGetter public static native int GlobalJitLevel_ARRAYSIZE(); public static final int GlobalJitLevel_ARRAYSIZE = GlobalJitLevel_ARRAYSIZE(); public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer GlobalJitLevel_descriptor(); public static native @StdString BytePointer GlobalJitLevel_Name(@Cast("tensorflow::OptimizerOptions::GlobalJitLevel") int value); public static native @Cast("bool") boolean GlobalJitLevel_Parse(@StdString BytePointer name, @Cast("tensorflow::OptimizerOptions::GlobalJitLevel*") IntPointer value); public static native @Cast("bool") boolean GlobalJitLevel_Parse(@StdString String name, @Cast("tensorflow::OptimizerOptions::GlobalJitLevel*") IntBuffer value); public static native @Cast("bool") boolean GlobalJitLevel_Parse(@StdString BytePointer name, @Cast("tensorflow::OptimizerOptions::GlobalJitLevel*") int... value); public static native @Cast("bool") boolean GlobalJitLevel_Parse(@StdString String name, @Cast("tensorflow::OptimizerOptions::GlobalJitLevel*") IntPointer value); public static native @Cast("bool") boolean GlobalJitLevel_Parse(@StdString BytePointer name, @Cast("tensorflow::OptimizerOptions::GlobalJitLevel*") IntBuffer value); public static native @Cast("bool") boolean GlobalJitLevel_Parse(@StdString String name, @Cast("tensorflow::OptimizerOptions::GlobalJitLevel*") int... value); // accessors ------------------------------------------------------- // bool do_common_subexpression_elimination = 1; public native void clear_do_common_subexpression_elimination(); @MemberGetter public static native int kDoCommonSubexpressionEliminationFieldNumber(); public static final int kDoCommonSubexpressionEliminationFieldNumber = kDoCommonSubexpressionEliminationFieldNumber(); public native @Cast("bool") boolean do_common_subexpression_elimination(); public native void set_do_common_subexpression_elimination(@Cast("bool") boolean value); // bool do_constant_folding = 2; public native void clear_do_constant_folding(); @MemberGetter public static native int kDoConstantFoldingFieldNumber(); public static final int kDoConstantFoldingFieldNumber = kDoConstantFoldingFieldNumber(); public native @Cast("bool") boolean do_constant_folding(); public native void set_do_constant_folding(@Cast("bool") boolean value); // bool do_function_inlining = 4; public native void clear_do_function_inlining(); @MemberGetter public static native int kDoFunctionInliningFieldNumber(); public static final int kDoFunctionInliningFieldNumber = kDoFunctionInliningFieldNumber(); public native @Cast("bool") boolean do_function_inlining(); public native void set_do_function_inlining(@Cast("bool") boolean value); // .tensorflow.OptimizerOptions.Level opt_level = 3; public native void clear_opt_level(); @MemberGetter public static native int kOptLevelFieldNumber(); public static final int kOptLevelFieldNumber = kOptLevelFieldNumber(); public native @Cast("tensorflow::OptimizerOptions_Level") int opt_level(); public native void set_opt_level(@Cast("tensorflow::OptimizerOptions_Level") int value); // int64 max_folded_constant_in_bytes = 6; public native void clear_max_folded_constant_in_bytes(); @MemberGetter public static native int kMaxFoldedConstantInBytesFieldNumber(); public static final int kMaxFoldedConstantInBytesFieldNumber = kMaxFoldedConstantInBytesFieldNumber(); public native @Cast("google::protobuf::int64") long max_folded_constant_in_bytes(); public native void set_max_folded_constant_in_bytes(@Cast("google::protobuf::int64") long value); // .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5; public native void clear_global_jit_level(); @MemberGetter public static native int kGlobalJitLevelFieldNumber(); public static final int kGlobalJitLevelFieldNumber = kGlobalJitLevelFieldNumber(); public native @Cast("tensorflow::OptimizerOptions_GlobalJitLevel") int global_jit_level(); public native void set_global_jit_level(@Cast("tensorflow::OptimizerOptions_GlobalJitLevel") int value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class GraphOptions extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GraphOptions(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public GraphOptions(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public GraphOptions position(long position) { return (GraphOptions)super.position(position); } public GraphOptions() { super((Pointer)null); allocate(); } private native void allocate(); public GraphOptions(@Const @ByRef GraphOptions from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef GraphOptions from); public native @ByRef @Name("operator =") GraphOptions put(@Const @ByRef GraphOptions from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef GraphOptions default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const GraphOptions internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(GraphOptions other); public native void Swap(GraphOptions other); // implements Message ---------------------------------------------- public native GraphOptions New(); public native GraphOptions New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef GraphOptions from); public native void MergeFrom(@Const @ByRef GraphOptions from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // .tensorflow.OptimizerOptions optimizer_options = 3; public native @Cast("bool") boolean has_optimizer_options(); public native void clear_optimizer_options(); @MemberGetter public static native int kOptimizerOptionsFieldNumber(); public static final int kOptimizerOptionsFieldNumber = kOptimizerOptionsFieldNumber(); public native @Const @ByRef OptimizerOptions optimizer_options(); public native OptimizerOptions release_optimizer_options(); public native OptimizerOptions mutable_optimizer_options(); public native void set_allocated_optimizer_options(OptimizerOptions optimizer_options); public native void unsafe_arena_set_allocated_optimizer_options( OptimizerOptions optimizer_options); public native OptimizerOptions unsafe_arena_release_optimizer_options(); // .tensorflow.RewriterConfig rewrite_options = 10; public native @Cast("bool") boolean has_rewrite_options(); public native void clear_rewrite_options(); @MemberGetter public static native int kRewriteOptionsFieldNumber(); public static final int kRewriteOptionsFieldNumber = kRewriteOptionsFieldNumber(); public native @Const @ByRef RewriterConfig rewrite_options(); public native RewriterConfig release_rewrite_options(); public native RewriterConfig mutable_rewrite_options(); public native void set_allocated_rewrite_options(RewriterConfig rewrite_options); public native void unsafe_arena_set_allocated_rewrite_options( RewriterConfig rewrite_options); public native RewriterConfig unsafe_arena_release_rewrite_options(); // int64 build_cost_model = 4; public native void clear_build_cost_model(); @MemberGetter public static native int kBuildCostModelFieldNumber(); public static final int kBuildCostModelFieldNumber = kBuildCostModelFieldNumber(); public native @Cast("google::protobuf::int64") long build_cost_model(); public native void set_build_cost_model(@Cast("google::protobuf::int64") long value); // bool enable_recv_scheduling = 2; public native void clear_enable_recv_scheduling(); @MemberGetter public static native int kEnableRecvSchedulingFieldNumber(); public static final int kEnableRecvSchedulingFieldNumber = kEnableRecvSchedulingFieldNumber(); public native @Cast("bool") boolean enable_recv_scheduling(); public native void set_enable_recv_scheduling(@Cast("bool") boolean value); // bool infer_shapes = 5; public native void clear_infer_shapes(); @MemberGetter public static native int kInferShapesFieldNumber(); public static final int kInferShapesFieldNumber = kInferShapesFieldNumber(); public native @Cast("bool") boolean infer_shapes(); public native void set_infer_shapes(@Cast("bool") boolean value); // bool place_pruned_graph = 6; public native void clear_place_pruned_graph(); @MemberGetter public static native int kPlacePrunedGraphFieldNumber(); public static final int kPlacePrunedGraphFieldNumber = kPlacePrunedGraphFieldNumber(); public native @Cast("bool") boolean place_pruned_graph(); public native void set_place_pruned_graph(@Cast("bool") boolean value); // bool enable_bfloat16_sendrecv = 7; public native void clear_enable_bfloat16_sendrecv(); @MemberGetter public static native int kEnableBfloat16SendrecvFieldNumber(); public static final int kEnableBfloat16SendrecvFieldNumber = kEnableBfloat16SendrecvFieldNumber(); public native @Cast("bool") boolean enable_bfloat16_sendrecv(); public native void set_enable_bfloat16_sendrecv(@Cast("bool") boolean value); // int32 timeline_step = 8; public native void clear_timeline_step(); @MemberGetter public static native int kTimelineStepFieldNumber(); public static final int kTimelineStepFieldNumber = kTimelineStepFieldNumber(); public native @Cast("google::protobuf::int32") int timeline_step(); public native void set_timeline_step(@Cast("google::protobuf::int32") int value); // int64 build_cost_model_after = 9; public native void clear_build_cost_model_after(); @MemberGetter public static native int kBuildCostModelAfterFieldNumber(); public static final int kBuildCostModelAfterFieldNumber = kBuildCostModelAfterFieldNumber(); public native @Cast("google::protobuf::int64") long build_cost_model_after(); public native void set_build_cost_model_after(@Cast("google::protobuf::int64") long value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class ThreadPoolOptionProto extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ThreadPoolOptionProto(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public ThreadPoolOptionProto(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public ThreadPoolOptionProto position(long position) { return (ThreadPoolOptionProto)super.position(position); } public ThreadPoolOptionProto() { super((Pointer)null); allocate(); } private native void allocate(); public ThreadPoolOptionProto(@Const @ByRef ThreadPoolOptionProto from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef ThreadPoolOptionProto from); public native @ByRef @Name("operator =") ThreadPoolOptionProto put(@Const @ByRef ThreadPoolOptionProto from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef ThreadPoolOptionProto default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const ThreadPoolOptionProto internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(ThreadPoolOptionProto other); public native void Swap(ThreadPoolOptionProto other); // implements Message ---------------------------------------------- public native ThreadPoolOptionProto New(); public native ThreadPoolOptionProto New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef ThreadPoolOptionProto from); public native void MergeFrom(@Const @ByRef ThreadPoolOptionProto from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // string global_name = 2; public native void clear_global_name(); @MemberGetter public static native int kGlobalNameFieldNumber(); public static final int kGlobalNameFieldNumber = kGlobalNameFieldNumber(); public native @StdString BytePointer global_name(); public native void set_global_name(@StdString BytePointer value); public native void set_global_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_global_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_global_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_global_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_global_name(); public native void set_allocated_global_name(@StdString @Cast({"char*", "std::string*"}) BytePointer global_name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_global_name(); public native @Deprecated void unsafe_arena_set_allocated_global_name( @StdString @Cast({"char*", "std::string*"}) BytePointer global_name); // int32 num_threads = 1; public native void clear_num_threads(); @MemberGetter public static native int kNumThreadsFieldNumber(); public static final int kNumThreadsFieldNumber = kNumThreadsFieldNumber(); public native @Cast("google::protobuf::int32") int num_threads(); public native void set_num_threads(@Cast("google::protobuf::int32") int value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class RPCOptions extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RPCOptions(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public RPCOptions(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public RPCOptions position(long position) { return (RPCOptions)super.position(position); } public RPCOptions() { super((Pointer)null); allocate(); } private native void allocate(); public RPCOptions(@Const @ByRef RPCOptions from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef RPCOptions from); public native @ByRef @Name("operator =") RPCOptions put(@Const @ByRef RPCOptions from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef RPCOptions default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const RPCOptions internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(RPCOptions other); public native void Swap(RPCOptions other); // implements Message ---------------------------------------------- public native RPCOptions New(); public native RPCOptions New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef RPCOptions from); public native void MergeFrom(@Const @ByRef RPCOptions from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // bool use_rpc_for_inprocess_master = 1; public native void clear_use_rpc_for_inprocess_master(); @MemberGetter public static native int kUseRpcForInprocessMasterFieldNumber(); public static final int kUseRpcForInprocessMasterFieldNumber = kUseRpcForInprocessMasterFieldNumber(); public native @Cast("bool") boolean use_rpc_for_inprocess_master(); public native void set_use_rpc_for_inprocess_master(@Cast("bool") boolean value); } // ------------------------------------------------------------------- // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class ConfigProto_Experimental extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConfigProto_Experimental(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public ConfigProto_Experimental(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public ConfigProto_Experimental position(long position) { return (ConfigProto_Experimental)super.position(position); } public ConfigProto_Experimental() { super((Pointer)null); allocate(); } private native void allocate(); public ConfigProto_Experimental(@Const @ByRef ConfigProto_Experimental from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef ConfigProto_Experimental from); public native @ByRef @Name("operator =") ConfigProto_Experimental put(@Const @ByRef ConfigProto_Experimental from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef ConfigProto_Experimental default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const ConfigProto_Experimental internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(ConfigProto_Experimental other); public native void Swap(ConfigProto_Experimental other); // implements Message ---------------------------------------------- public native ConfigProto_Experimental New(); public native ConfigProto_Experimental New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef ConfigProto_Experimental from); public native void MergeFrom(@Const @ByRef ConfigProto_Experimental from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // string collective_group_leader = 1; public native void clear_collective_group_leader(); @MemberGetter public static native int kCollectiveGroupLeaderFieldNumber(); public static final int kCollectiveGroupLeaderFieldNumber = kCollectiveGroupLeaderFieldNumber(); public native @StdString BytePointer collective_group_leader(); public native void set_collective_group_leader(@StdString BytePointer value); public native void set_collective_group_leader(@StdString String value); // #if LANG_CXX11 // #endif public native void set_collective_group_leader(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_collective_group_leader(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_collective_group_leader(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_collective_group_leader(); public native void set_allocated_collective_group_leader(@StdString @Cast({"char*", "std::string*"}) BytePointer collective_group_leader); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_collective_group_leader(); public native @Deprecated void unsafe_arena_set_allocated_collective_group_leader( @StdString @Cast({"char*", "std::string*"}) BytePointer collective_group_leader); // string executor_type = 3; public native void clear_executor_type(); @MemberGetter public static native int kExecutorTypeFieldNumber(); public static final int kExecutorTypeFieldNumber = kExecutorTypeFieldNumber(); public native @StdString BytePointer executor_type(); public native void set_executor_type(@StdString BytePointer value); public native void set_executor_type(@StdString String value); // #if LANG_CXX11 // #endif public native void set_executor_type(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_executor_type(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_executor_type(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_executor_type(); public native void set_allocated_executor_type(@StdString @Cast({"char*", "std::string*"}) BytePointer executor_type); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_executor_type(); public native @Deprecated void unsafe_arena_set_allocated_executor_type( @StdString @Cast({"char*", "std::string*"}) BytePointer executor_type); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class ConfigProto extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConfigProto(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public ConfigProto(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public ConfigProto position(long position) { return (ConfigProto)super.position(position); } public ConfigProto() { super((Pointer)null); allocate(); } private native void allocate(); public ConfigProto(@Const @ByRef ConfigProto from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef ConfigProto from); public native @ByRef @Name("operator =") ConfigProto put(@Const @ByRef ConfigProto from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef ConfigProto default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const ConfigProto internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(ConfigProto other); public native void Swap(ConfigProto other); // implements Message ---------------------------------------------- public native ConfigProto New(); public native ConfigProto New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef ConfigProto from); public native void MergeFrom(@Const @ByRef ConfigProto from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // map device_count = 1; public native int device_count_size(); public native void clear_device_count(); @MemberGetter public static native int kDeviceCountFieldNumber(); public static final int kDeviceCountFieldNumber = kDeviceCountFieldNumber(); public native @Const @ByRef StringIntMap device_count(); public native StringIntMap mutable_device_count(); // repeated string device_filters = 4; public native int device_filters_size(); public native void clear_device_filters(); @MemberGetter public static native int kDeviceFiltersFieldNumber(); public static final int kDeviceFiltersFieldNumber = kDeviceFiltersFieldNumber(); public native @StdString BytePointer device_filters(int index); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_device_filters(int index); public native void set_device_filters(int index, @StdString BytePointer value); public native void set_device_filters(int index, @StdString String value); // #if LANG_CXX11 // #endif public native void set_device_filters(int index, @Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_device_filters(int index, String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_device_filters(); public native void add_device_filters(@StdString BytePointer value); public native void add_device_filters(@StdString String value); // #if LANG_CXX11 // #endif public native void add_device_filters(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void add_device_filters(String value, @Cast("size_t") long size); // repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12; public native int session_inter_op_thread_pool_size(); public native void clear_session_inter_op_thread_pool(); @MemberGetter public static native int kSessionInterOpThreadPoolFieldNumber(); public static final int kSessionInterOpThreadPoolFieldNumber = kSessionInterOpThreadPoolFieldNumber(); public native ThreadPoolOptionProto mutable_session_inter_op_thread_pool(int index); public native @Const @ByRef ThreadPoolOptionProto session_inter_op_thread_pool(int index); public native ThreadPoolOptionProto add_session_inter_op_thread_pool(); // .tensorflow.GPUOptions gpu_options = 6; public native @Cast("bool") boolean has_gpu_options(); public native void clear_gpu_options(); @MemberGetter public static native int kGpuOptionsFieldNumber(); public static final int kGpuOptionsFieldNumber = kGpuOptionsFieldNumber(); public native @Const @ByRef GPUOptions gpu_options(); public native GPUOptions release_gpu_options(); public native GPUOptions mutable_gpu_options(); public native void set_allocated_gpu_options(GPUOptions gpu_options); public native void unsafe_arena_set_allocated_gpu_options( GPUOptions gpu_options); public native GPUOptions unsafe_arena_release_gpu_options(); // .tensorflow.GraphOptions graph_options = 10; public native @Cast("bool") boolean has_graph_options(); public native void clear_graph_options(); @MemberGetter public static native int kGraphOptionsFieldNumber(); public static final int kGraphOptionsFieldNumber = kGraphOptionsFieldNumber(); public native @Const @ByRef GraphOptions graph_options(); public native GraphOptions release_graph_options(); public native GraphOptions mutable_graph_options(); public native void set_allocated_graph_options(GraphOptions graph_options); public native void unsafe_arena_set_allocated_graph_options( GraphOptions graph_options); public native GraphOptions unsafe_arena_release_graph_options(); // .tensorflow.RPCOptions rpc_options = 13; public native @Cast("bool") boolean has_rpc_options(); public native void clear_rpc_options(); @MemberGetter public static native int kRpcOptionsFieldNumber(); public static final int kRpcOptionsFieldNumber = kRpcOptionsFieldNumber(); public native @Const @ByRef RPCOptions rpc_options(); public native RPCOptions release_rpc_options(); public native RPCOptions mutable_rpc_options(); public native void set_allocated_rpc_options(RPCOptions rpc_options); public native void unsafe_arena_set_allocated_rpc_options( RPCOptions rpc_options); public native RPCOptions unsafe_arena_release_rpc_options(); // .tensorflow.ClusterDef cluster_def = 14; public native @Cast("bool") boolean has_cluster_def(); public native void clear_cluster_def(); @MemberGetter public static native int kClusterDefFieldNumber(); public static final int kClusterDefFieldNumber = kClusterDefFieldNumber(); public native @Const @ByRef ClusterDef cluster_def(); public native ClusterDef release_cluster_def(); public native ClusterDef mutable_cluster_def(); public native void set_allocated_cluster_def(ClusterDef cluster_def); public native void unsafe_arena_set_allocated_cluster_def( ClusterDef cluster_def); public native ClusterDef unsafe_arena_release_cluster_def(); // .tensorflow.ConfigProto.Experimental experimental = 16; public native @Cast("bool") boolean has_experimental(); public native void clear_experimental(); @MemberGetter public static native int kExperimentalFieldNumber(); public static final int kExperimentalFieldNumber = kExperimentalFieldNumber(); public native @Const @ByRef ConfigProto_Experimental experimental(); public native ConfigProto_Experimental release_experimental(); public native ConfigProto_Experimental mutable_experimental(); public native void set_allocated_experimental(ConfigProto_Experimental experimental); public native void unsafe_arena_set_allocated_experimental( ConfigProto_Experimental experimental); public native ConfigProto_Experimental unsafe_arena_release_experimental(); // int32 intra_op_parallelism_threads = 2; public native void clear_intra_op_parallelism_threads(); @MemberGetter public static native int kIntraOpParallelismThreadsFieldNumber(); public static final int kIntraOpParallelismThreadsFieldNumber = kIntraOpParallelismThreadsFieldNumber(); public native @Cast("google::protobuf::int32") int intra_op_parallelism_threads(); public native void set_intra_op_parallelism_threads(@Cast("google::protobuf::int32") int value); // int32 placement_period = 3; public native void clear_placement_period(); @MemberGetter public static native int kPlacementPeriodFieldNumber(); public static final int kPlacementPeriodFieldNumber = kPlacementPeriodFieldNumber(); public native @Cast("google::protobuf::int32") int placement_period(); public native void set_placement_period(@Cast("google::protobuf::int32") int value); // int32 inter_op_parallelism_threads = 5; public native void clear_inter_op_parallelism_threads(); @MemberGetter public static native int kInterOpParallelismThreadsFieldNumber(); public static final int kInterOpParallelismThreadsFieldNumber = kInterOpParallelismThreadsFieldNumber(); public native @Cast("google::protobuf::int32") int inter_op_parallelism_threads(); public native void set_inter_op_parallelism_threads(@Cast("google::protobuf::int32") int value); // bool use_per_session_threads = 9; public native void clear_use_per_session_threads(); @MemberGetter public static native int kUsePerSessionThreadsFieldNumber(); public static final int kUsePerSessionThreadsFieldNumber = kUsePerSessionThreadsFieldNumber(); public native @Cast("bool") boolean use_per_session_threads(); public native void set_use_per_session_threads(@Cast("bool") boolean value); // bool allow_soft_placement = 7; public native void clear_allow_soft_placement(); @MemberGetter public static native int kAllowSoftPlacementFieldNumber(); public static final int kAllowSoftPlacementFieldNumber = kAllowSoftPlacementFieldNumber(); public native @Cast("bool") boolean allow_soft_placement(); public native void set_allow_soft_placement(@Cast("bool") boolean value); // bool log_device_placement = 8; public native void clear_log_device_placement(); @MemberGetter public static native int kLogDevicePlacementFieldNumber(); public static final int kLogDevicePlacementFieldNumber = kLogDevicePlacementFieldNumber(); public native @Cast("bool") boolean log_device_placement(); public native void set_log_device_placement(@Cast("bool") boolean value); // bool isolate_session_state = 15; public native void clear_isolate_session_state(); @MemberGetter public static native int kIsolateSessionStateFieldNumber(); public static final int kIsolateSessionStateFieldNumber = kIsolateSessionStateFieldNumber(); public native @Cast("bool") boolean isolate_session_state(); public native void set_isolate_session_state(@Cast("bool") boolean value); // int64 operation_timeout_in_ms = 11; public native void clear_operation_timeout_in_ms(); @MemberGetter public static native int kOperationTimeoutInMsFieldNumber(); public static final int kOperationTimeoutInMsFieldNumber = kOperationTimeoutInMsFieldNumber(); public native @Cast("google::protobuf::int64") long operation_timeout_in_ms(); public native void set_operation_timeout_in_ms(@Cast("google::protobuf::int64") long value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class RunOptions_Experimental extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RunOptions_Experimental(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public RunOptions_Experimental(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public RunOptions_Experimental position(long position) { return (RunOptions_Experimental)super.position(position); } public RunOptions_Experimental() { super((Pointer)null); allocate(); } private native void allocate(); public RunOptions_Experimental(@Const @ByRef RunOptions_Experimental from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef RunOptions_Experimental from); public native @ByRef @Name("operator =") RunOptions_Experimental put(@Const @ByRef RunOptions_Experimental from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef RunOptions_Experimental default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const RunOptions_Experimental internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(RunOptions_Experimental other); public native void Swap(RunOptions_Experimental other); // implements Message ---------------------------------------------- public native RunOptions_Experimental New(); public native RunOptions_Experimental New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef RunOptions_Experimental from); public native void MergeFrom(@Const @ByRef RunOptions_Experimental from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // int64 collective_graph_key = 1; public native void clear_collective_graph_key(); @MemberGetter public static native int kCollectiveGraphKeyFieldNumber(); public static final int kCollectiveGraphKeyFieldNumber = kCollectiveGraphKeyFieldNumber(); public native @Cast("google::protobuf::int64") long collective_graph_key(); public native void set_collective_graph_key(@Cast("google::protobuf::int64") long value); // bool use_run_handler_pool = 2; public native void clear_use_run_handler_pool(); @MemberGetter public static native int kUseRunHandlerPoolFieldNumber(); public static final int kUseRunHandlerPoolFieldNumber = kUseRunHandlerPoolFieldNumber(); public native @Cast("bool") boolean use_run_handler_pool(); public native void set_use_run_handler_pool(@Cast("bool") boolean value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class RunOptions extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RunOptions(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public RunOptions(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public RunOptions position(long position) { return (RunOptions)super.position(position); } public RunOptions() { super((Pointer)null); allocate(); } private native void allocate(); public RunOptions(@Const @ByRef RunOptions from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef RunOptions from); public native @ByRef @Name("operator =") RunOptions put(@Const @ByRef RunOptions from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef RunOptions default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const RunOptions internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(RunOptions other); public native void Swap(RunOptions other); // implements Message ---------------------------------------------- public native RunOptions New(); public native RunOptions New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef RunOptions from); public native void MergeFrom(@Const @ByRef RunOptions from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- @MemberGetter public static native @Cast("const tensorflow::RunOptions::TraceLevel") int NO_TRACE(); public static final int NO_TRACE = NO_TRACE(); @MemberGetter public static native @Cast("const tensorflow::RunOptions::TraceLevel") int SOFTWARE_TRACE(); public static final int SOFTWARE_TRACE = SOFTWARE_TRACE(); @MemberGetter public static native @Cast("const tensorflow::RunOptions::TraceLevel") int HARDWARE_TRACE(); public static final int HARDWARE_TRACE = HARDWARE_TRACE(); @MemberGetter public static native @Cast("const tensorflow::RunOptions::TraceLevel") int FULL_TRACE(); public static final int FULL_TRACE = FULL_TRACE(); public static native @Cast("bool") boolean TraceLevel_IsValid(int value); @MemberGetter public static native @Cast("const tensorflow::RunOptions::TraceLevel") int TraceLevel_MIN(); public static final int TraceLevel_MIN = TraceLevel_MIN(); @MemberGetter public static native @Cast("const tensorflow::RunOptions::TraceLevel") int TraceLevel_MAX(); public static final int TraceLevel_MAX = TraceLevel_MAX(); @MemberGetter public static native int TraceLevel_ARRAYSIZE(); public static final int TraceLevel_ARRAYSIZE = TraceLevel_ARRAYSIZE(); public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer TraceLevel_descriptor(); public static native @StdString BytePointer TraceLevel_Name(@Cast("tensorflow::RunOptions::TraceLevel") int value); public static native @Cast("bool") boolean TraceLevel_Parse(@StdString BytePointer name, @Cast("tensorflow::RunOptions::TraceLevel*") IntPointer value); public static native @Cast("bool") boolean TraceLevel_Parse(@StdString String name, @Cast("tensorflow::RunOptions::TraceLevel*") IntBuffer value); public static native @Cast("bool") boolean TraceLevel_Parse(@StdString BytePointer name, @Cast("tensorflow::RunOptions::TraceLevel*") int... value); public static native @Cast("bool") boolean TraceLevel_Parse(@StdString String name, @Cast("tensorflow::RunOptions::TraceLevel*") IntPointer value); public static native @Cast("bool") boolean TraceLevel_Parse(@StdString BytePointer name, @Cast("tensorflow::RunOptions::TraceLevel*") IntBuffer value); public static native @Cast("bool") boolean TraceLevel_Parse(@StdString String name, @Cast("tensorflow::RunOptions::TraceLevel*") int... value); // accessors ------------------------------------------------------- // .tensorflow.DebugOptions debug_options = 6; public native @Cast("bool") boolean has_debug_options(); public native void clear_debug_options(); @MemberGetter public static native int kDebugOptionsFieldNumber(); public static final int kDebugOptionsFieldNumber = kDebugOptionsFieldNumber(); public native @Const @ByRef DebugOptions debug_options(); public native DebugOptions release_debug_options(); public native DebugOptions mutable_debug_options(); public native void set_allocated_debug_options(DebugOptions debug_options); public native void unsafe_arena_set_allocated_debug_options( DebugOptions debug_options); public native DebugOptions unsafe_arena_release_debug_options(); // .tensorflow.RunOptions.Experimental experimental = 8; public native @Cast("bool") boolean has_experimental(); public native void clear_experimental(); @MemberGetter public static native int kExperimentalFieldNumber(); public static final int kExperimentalFieldNumber = kExperimentalFieldNumber(); public native @Const @ByRef RunOptions_Experimental experimental(); public native RunOptions_Experimental release_experimental(); public native RunOptions_Experimental mutable_experimental(); public native void set_allocated_experimental(RunOptions_Experimental experimental); public native void unsafe_arena_set_allocated_experimental( RunOptions_Experimental experimental); public native RunOptions_Experimental unsafe_arena_release_experimental(); // int64 timeout_in_ms = 2; public native void clear_timeout_in_ms(); @MemberGetter public static native int kTimeoutInMsFieldNumber(); public static final int kTimeoutInMsFieldNumber = kTimeoutInMsFieldNumber(); public native @Cast("google::protobuf::int64") long timeout_in_ms(); public native void set_timeout_in_ms(@Cast("google::protobuf::int64") long value); // .tensorflow.RunOptions.TraceLevel trace_level = 1; public native void clear_trace_level(); @MemberGetter public static native int kTraceLevelFieldNumber(); public static final int kTraceLevelFieldNumber = kTraceLevelFieldNumber(); public native @Cast("tensorflow::RunOptions_TraceLevel") int trace_level(); public native void set_trace_level(@Cast("tensorflow::RunOptions_TraceLevel") int value); // int32 inter_op_thread_pool = 3; public native void clear_inter_op_thread_pool(); @MemberGetter public static native int kInterOpThreadPoolFieldNumber(); public static final int kInterOpThreadPoolFieldNumber = kInterOpThreadPoolFieldNumber(); public native @Cast("google::protobuf::int32") int inter_op_thread_pool(); public native void set_inter_op_thread_pool(@Cast("google::protobuf::int32") int value); // bool output_partition_graphs = 5; public native void clear_output_partition_graphs(); @MemberGetter public static native int kOutputPartitionGraphsFieldNumber(); public static final int kOutputPartitionGraphsFieldNumber = kOutputPartitionGraphsFieldNumber(); public native @Cast("bool") boolean output_partition_graphs(); public native void set_output_partition_graphs(@Cast("bool") boolean value); // bool report_tensor_allocations_upon_oom = 7; public native void clear_report_tensor_allocations_upon_oom(); @MemberGetter public static native int kReportTensorAllocationsUponOomFieldNumber(); public static final int kReportTensorAllocationsUponOomFieldNumber = kReportTensorAllocationsUponOomFieldNumber(); public native @Cast("bool") boolean report_tensor_allocations_upon_oom(); public native void set_report_tensor_allocations_upon_oom(@Cast("bool") boolean value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class RunMetadata extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RunMetadata(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public RunMetadata(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public RunMetadata position(long position) { return (RunMetadata)super.position(position); } public RunMetadata() { super((Pointer)null); allocate(); } private native void allocate(); public RunMetadata(@Const @ByRef RunMetadata from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef RunMetadata from); public native @ByRef @Name("operator =") RunMetadata put(@Const @ByRef RunMetadata from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef RunMetadata default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const RunMetadata internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(RunMetadata other); public native void Swap(RunMetadata other); // implements Message ---------------------------------------------- public native RunMetadata New(); public native RunMetadata New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef RunMetadata from); public native void MergeFrom(@Const @ByRef RunMetadata from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.GraphDef partition_graphs = 3; public native int partition_graphs_size(); public native void clear_partition_graphs(); @MemberGetter public static native int kPartitionGraphsFieldNumber(); public static final int kPartitionGraphsFieldNumber = kPartitionGraphsFieldNumber(); public native GraphDef mutable_partition_graphs(int index); public native @Const @ByRef GraphDef partition_graphs(int index); public native GraphDef add_partition_graphs(); // .tensorflow.StepStats step_stats = 1; public native @Cast("bool") boolean has_step_stats(); public native void clear_step_stats(); @MemberGetter public static native int kStepStatsFieldNumber(); public static final int kStepStatsFieldNumber = kStepStatsFieldNumber(); public native @Const @ByRef StepStats step_stats(); public native StepStats release_step_stats(); public native StepStats mutable_step_stats(); public native void set_allocated_step_stats(StepStats step_stats); public native void unsafe_arena_set_allocated_step_stats( StepStats step_stats); public native StepStats unsafe_arena_release_step_stats(); // .tensorflow.CostGraphDef cost_graph = 2; public native @Cast("bool") boolean has_cost_graph(); public native void clear_cost_graph(); @MemberGetter public static native int kCostGraphFieldNumber(); public static final int kCostGraphFieldNumber = kCostGraphFieldNumber(); public native @Const @ByRef CostGraphDef cost_graph(); public native CostGraphDef release_cost_graph(); public native CostGraphDef mutable_cost_graph(); public native void set_allocated_cost_graph(CostGraphDef cost_graph); public native void unsafe_arena_set_allocated_cost_graph( CostGraphDef cost_graph); public native CostGraphDef unsafe_arena_release_cost_graph(); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class TensorConnection extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorConnection(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TensorConnection(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public TensorConnection position(long position) { return (TensorConnection)super.position(position); } public TensorConnection() { super((Pointer)null); allocate(); } private native void allocate(); public TensorConnection(@Const @ByRef TensorConnection from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef TensorConnection from); public native @ByRef @Name("operator =") TensorConnection put(@Const @ByRef TensorConnection from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef TensorConnection default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const TensorConnection internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(TensorConnection other); public native void Swap(TensorConnection other); // implements Message ---------------------------------------------- public native TensorConnection New(); public native TensorConnection New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef TensorConnection from); public native void MergeFrom(@Const @ByRef TensorConnection from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // string from_tensor = 1; public native void clear_from_tensor(); @MemberGetter public static native int kFromTensorFieldNumber(); public static final int kFromTensorFieldNumber = kFromTensorFieldNumber(); public native @StdString BytePointer from_tensor(); public native void set_from_tensor(@StdString BytePointer value); public native void set_from_tensor(@StdString String value); // #if LANG_CXX11 // #endif public native void set_from_tensor(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_from_tensor(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_from_tensor(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_from_tensor(); public native void set_allocated_from_tensor(@StdString @Cast({"char*", "std::string*"}) BytePointer from_tensor); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_from_tensor(); public native @Deprecated void unsafe_arena_set_allocated_from_tensor( @StdString @Cast({"char*", "std::string*"}) BytePointer from_tensor); // string to_tensor = 2; public native void clear_to_tensor(); @MemberGetter public static native int kToTensorFieldNumber(); public static final int kToTensorFieldNumber = kToTensorFieldNumber(); public native @StdString BytePointer to_tensor(); public native void set_to_tensor(@StdString BytePointer value); public native void set_to_tensor(@StdString String value); // #if LANG_CXX11 // #endif public native void set_to_tensor(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_to_tensor(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_to_tensor(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_to_tensor(); public native void set_allocated_to_tensor(@StdString @Cast({"char*", "std::string*"}) BytePointer to_tensor); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_to_tensor(); public native @Deprecated void unsafe_arena_set_allocated_to_tensor( @StdString @Cast({"char*", "std::string*"}) BytePointer to_tensor); } // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class CallableOptions extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CallableOptions(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public CallableOptions(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public CallableOptions position(long position) { return (CallableOptions)super.position(position); } public CallableOptions() { super((Pointer)null); allocate(); } private native void allocate(); public CallableOptions(@Const @ByRef CallableOptions from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef CallableOptions from); public native @ByRef @Name("operator =") CallableOptions put(@Const @ByRef CallableOptions from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef CallableOptions default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const CallableOptions internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(CallableOptions other); public native void Swap(CallableOptions other); // implements Message ---------------------------------------------- public native CallableOptions New(); public native CallableOptions New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef CallableOptions from); public native void MergeFrom(@Const @ByRef CallableOptions from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated string feed = 1; public native int feed_size(); public native void clear_feed(); @MemberGetter public static native int kFeedFieldNumber(); public static final int kFeedFieldNumber = kFeedFieldNumber(); public native @StdString BytePointer feed(int index); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_feed(int index); public native void set_feed(int index, @StdString BytePointer value); public native void set_feed(int index, @StdString String value); // #if LANG_CXX11 // #endif public native void set_feed(int index, @Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_feed(int index, String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_feed(); public native void add_feed(@StdString BytePointer value); public native void add_feed(@StdString String value); // #if LANG_CXX11 // #endif public native void add_feed(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void add_feed(String value, @Cast("size_t") long size); // repeated string fetch = 2; public native int fetch_size(); public native void clear_fetch(); @MemberGetter public static native int kFetchFieldNumber(); public static final int kFetchFieldNumber = kFetchFieldNumber(); public native @StdString BytePointer fetch(int index); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_fetch(int index); public native void set_fetch(int index, @StdString BytePointer value); public native void set_fetch(int index, @StdString String value); // #if LANG_CXX11 // #endif public native void set_fetch(int index, @Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_fetch(int index, String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_fetch(); public native void add_fetch(@StdString BytePointer value); public native void add_fetch(@StdString String value); // #if LANG_CXX11 // #endif public native void add_fetch(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void add_fetch(String value, @Cast("size_t") long size); // repeated string target = 3; public native int target_size(); public native void clear_target(); @MemberGetter public static native int kTargetFieldNumber(); public static final int kTargetFieldNumber = kTargetFieldNumber(); public native @StdString BytePointer target(int index); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_target(int index); public native void set_target(int index, @StdString BytePointer value); public native void set_target(int index, @StdString String value); // #if LANG_CXX11 // #endif public native void set_target(int index, @Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_target(int index, String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_target(); public native void add_target(@StdString BytePointer value); public native void add_target(@StdString String value); // #if LANG_CXX11 // #endif public native void add_target(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void add_target(String value, @Cast("size_t") long size); // repeated .tensorflow.TensorConnection tensor_connection = 5; public native int tensor_connection_size(); public native void clear_tensor_connection(); @MemberGetter public static native int kTensorConnectionFieldNumber(); public static final int kTensorConnectionFieldNumber = kTensorConnectionFieldNumber(); public native TensorConnection mutable_tensor_connection(int index); public native @Const @ByRef TensorConnection tensor_connection(int index); public native TensorConnection add_tensor_connection(); // map feed_devices = 6; public native int feed_devices_size(); public native void clear_feed_devices(); @MemberGetter public static native int kFeedDevicesFieldNumber(); public static final int kFeedDevicesFieldNumber = kFeedDevicesFieldNumber(); public native @Const @ByRef StringStringMap feed_devices(); public native StringStringMap mutable_feed_devices(); // map fetch_devices = 7; public native int fetch_devices_size(); public native void clear_fetch_devices(); @MemberGetter public static native int kFetchDevicesFieldNumber(); public static final int kFetchDevicesFieldNumber = kFetchDevicesFieldNumber(); public native @Const @ByRef StringStringMap fetch_devices(); public native StringStringMap mutable_fetch_devices(); // .tensorflow.RunOptions run_options = 4; public native @Cast("bool") boolean has_run_options(); public native void clear_run_options(); @MemberGetter public static native int kRunOptionsFieldNumber(); public static final int kRunOptionsFieldNumber = kRunOptionsFieldNumber(); public native @Const @ByRef RunOptions run_options(); public native RunOptions release_run_options(); public native RunOptions mutable_run_options(); public native void set_allocated_run_options(RunOptions run_options); public native void unsafe_arena_set_allocated_run_options( RunOptions run_options); public native RunOptions unsafe_arena_release_run_options(); // bool fetch_skip_sync = 8; public native void clear_fetch_skip_sync(); @MemberGetter public static native int kFetchSkipSyncFieldNumber(); public static final int kFetchSkipSyncFieldNumber = kFetchSkipSyncFieldNumber(); public native @Cast("bool") boolean fetch_skip_sync(); public native void set_fetch_skip_sync(@Cast("bool") boolean value); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // GPUOptions_Experimental_VirtualDevices // repeated float memory_limit_mb = 1; // ------------------------------------------------------------------- // GPUOptions_Experimental // repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; // bool use_unified_memory = 2; // int32 num_dev_to_dev_copy_streams = 3; // ------------------------------------------------------------------- // GPUOptions // double per_process_gpu_memory_fraction = 1; // bool allow_growth = 4; // string allocator_type = 2; // #if LANG_CXX11 // #endif // int64 deferred_deletion_bytes = 3; // string visible_device_list = 5; // #if LANG_CXX11 // #endif // int32 polling_active_delay_usecs = 6; // int32 polling_inactive_delay_msecs = 7; // bool force_gpu_compatible = 8; // .tensorflow.GPUOptions.Experimental experimental = 9; // ------------------------------------------------------------------- // OptimizerOptions // bool do_common_subexpression_elimination = 1; // bool do_constant_folding = 2; // int64 max_folded_constant_in_bytes = 6; // bool do_function_inlining = 4; // .tensorflow.OptimizerOptions.Level opt_level = 3; // .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5; // ------------------------------------------------------------------- // GraphOptions // bool enable_recv_scheduling = 2; // .tensorflow.OptimizerOptions optimizer_options = 3; // int64 build_cost_model = 4; // int64 build_cost_model_after = 9; // bool infer_shapes = 5; // bool place_pruned_graph = 6; // bool enable_bfloat16_sendrecv = 7; // int32 timeline_step = 8; // .tensorflow.RewriterConfig rewrite_options = 10; // ------------------------------------------------------------------- // ThreadPoolOptionProto // int32 num_threads = 1; // string global_name = 2; // #if LANG_CXX11 // #endif // ------------------------------------------------------------------- // RPCOptions // bool use_rpc_for_inprocess_master = 1; // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ConfigProto_Experimental // string collective_group_leader = 1; // #if LANG_CXX11 // #endif // string executor_type = 3; // #if LANG_CXX11 // #endif // ------------------------------------------------------------------- // ConfigProto // map device_count = 1; // int32 intra_op_parallelism_threads = 2; // int32 inter_op_parallelism_threads = 5; // bool use_per_session_threads = 9; // repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12; // int32 placement_period = 3; // repeated string device_filters = 4; // #if LANG_CXX11 // #endif // #if LANG_CXX11 // #endif // .tensorflow.GPUOptions gpu_options = 6; // bool allow_soft_placement = 7; // bool log_device_placement = 8; // .tensorflow.GraphOptions graph_options = 10; // int64 operation_timeout_in_ms = 11; // .tensorflow.RPCOptions rpc_options = 13; // .tensorflow.ClusterDef cluster_def = 14; // bool isolate_session_state = 15; // .tensorflow.ConfigProto.Experimental experimental = 16; // ------------------------------------------------------------------- // RunOptions_Experimental // int64 collective_graph_key = 1; // bool use_run_handler_pool = 2; // ------------------------------------------------------------------- // RunOptions // .tensorflow.RunOptions.TraceLevel trace_level = 1; // int64 timeout_in_ms = 2; // int32 inter_op_thread_pool = 3; // bool output_partition_graphs = 5; // .tensorflow.DebugOptions debug_options = 6; // bool report_tensor_allocations_upon_oom = 7; // .tensorflow.RunOptions.Experimental experimental = 8; // ------------------------------------------------------------------- // RunMetadata // .tensorflow.StepStats step_stats = 1; // .tensorflow.CostGraphDef cost_graph = 2; // repeated .tensorflow.GraphDef partition_graphs = 3; // ------------------------------------------------------------------- // TensorConnection // string from_tensor = 1; // #if LANG_CXX11 // #endif // string to_tensor = 2; // #if LANG_CXX11 // #endif // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // CallableOptions // repeated string feed = 1; // #if LANG_CXX11 // #endif // #if LANG_CXX11 // #endif // repeated string fetch = 2; // #if LANG_CXX11 // #endif // #if LANG_CXX11 // #endif // repeated string target = 3; // #if LANG_CXX11 // #endif // #if LANG_CXX11 // #endif // .tensorflow.RunOptions run_options = 4; // repeated .tensorflow.TensorConnection tensor_connection = 5; // map feed_devices = 6; // map fetch_devices = 7; // bool fetch_skip_sync = 8; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // namespace protobuf // namespace google // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto // Parsed from tensorflow/core/framework/cost_graph.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/framework/cost_graph.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fcost_5fgraph_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fcost_5fgraph_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // #include "tensorflow/core/framework/tensor_shape.pb.h" // #include "tensorflow/core/framework/types.pb.h" // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fframework_2fcost_5fgraph_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fframework_2fcost_5fgraph_2eproto // namespace tensorflow // namespace protobuf // namespace google // =================================================================== @Namespace("tensorflow") @NoOffset public static class CostGraphDef_Node_InputInfo extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CostGraphDef_Node_InputInfo(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public CostGraphDef_Node_InputInfo(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public CostGraphDef_Node_InputInfo position(long position) { return (CostGraphDef_Node_InputInfo)super.position(position); } public CostGraphDef_Node_InputInfo() { super((Pointer)null); allocate(); } private native void allocate(); public CostGraphDef_Node_InputInfo(@Const @ByRef CostGraphDef_Node_InputInfo from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef CostGraphDef_Node_InputInfo from); public native @ByRef @Name("operator =") CostGraphDef_Node_InputInfo put(@Const @ByRef CostGraphDef_Node_InputInfo from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef CostGraphDef_Node_InputInfo default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const CostGraphDef_Node_InputInfo internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(CostGraphDef_Node_InputInfo other); public native void Swap(CostGraphDef_Node_InputInfo other); // implements Message ---------------------------------------------- public native CostGraphDef_Node_InputInfo New(); public native CostGraphDef_Node_InputInfo New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef CostGraphDef_Node_InputInfo from); public native void MergeFrom(@Const @ByRef CostGraphDef_Node_InputInfo from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // int32 preceding_node = 1; public native void clear_preceding_node(); @MemberGetter public static native int kPrecedingNodeFieldNumber(); public static final int kPrecedingNodeFieldNumber = kPrecedingNodeFieldNumber(); public native @Cast("google::protobuf::int32") int preceding_node(); public native void set_preceding_node(@Cast("google::protobuf::int32") int value); // int32 preceding_port = 2; public native void clear_preceding_port(); @MemberGetter public static native int kPrecedingPortFieldNumber(); public static final int kPrecedingPortFieldNumber = kPrecedingPortFieldNumber(); public native @Cast("google::protobuf::int32") int preceding_port(); public native void set_preceding_port(@Cast("google::protobuf::int32") int value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class CostGraphDef_Node_OutputInfo extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CostGraphDef_Node_OutputInfo(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public CostGraphDef_Node_OutputInfo(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public CostGraphDef_Node_OutputInfo position(long position) { return (CostGraphDef_Node_OutputInfo)super.position(position); } public CostGraphDef_Node_OutputInfo() { super((Pointer)null); allocate(); } private native void allocate(); public CostGraphDef_Node_OutputInfo(@Const @ByRef CostGraphDef_Node_OutputInfo from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef CostGraphDef_Node_OutputInfo from); public native @ByRef @Name("operator =") CostGraphDef_Node_OutputInfo put(@Const @ByRef CostGraphDef_Node_OutputInfo from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef CostGraphDef_Node_OutputInfo default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const CostGraphDef_Node_OutputInfo internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(CostGraphDef_Node_OutputInfo other); public native void Swap(CostGraphDef_Node_OutputInfo other); // implements Message ---------------------------------------------- public native CostGraphDef_Node_OutputInfo New(); public native CostGraphDef_Node_OutputInfo New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef CostGraphDef_Node_OutputInfo from); public native void MergeFrom(@Const @ByRef CostGraphDef_Node_OutputInfo from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // .tensorflow.TensorShapeProto shape = 3; public native @Cast("bool") boolean has_shape(); public native void clear_shape(); @MemberGetter public static native int kShapeFieldNumber(); public static final int kShapeFieldNumber = kShapeFieldNumber(); public native @Const @ByRef TensorShapeProto shape(); public native TensorShapeProto release_shape(); public native TensorShapeProto mutable_shape(); public native void set_allocated_shape(TensorShapeProto shape); public native void unsafe_arena_set_allocated_shape( TensorShapeProto shape); public native TensorShapeProto unsafe_arena_release_shape(); // int64 size = 1; public native void clear_size(); @MemberGetter public static native int kSizeFieldNumber(); public static final int kSizeFieldNumber = kSizeFieldNumber(); public native @Cast("google::protobuf::int64") long size(); public native void set_size(@Cast("google::protobuf::int64") long value); // int64 alias_input_port = 2; public native void clear_alias_input_port(); @MemberGetter public static native int kAliasInputPortFieldNumber(); public static final int kAliasInputPortFieldNumber = kAliasInputPortFieldNumber(); public native @Cast("google::protobuf::int64") long alias_input_port(); public native void set_alias_input_port(@Cast("google::protobuf::int64") long value); // .tensorflow.DataType dtype = 4; public native void clear_dtype(); @MemberGetter public static native int kDtypeFieldNumber(); public static final int kDtypeFieldNumber = kDtypeFieldNumber(); public native @Cast("tensorflow::DataType") int dtype(); public native void set_dtype(@Cast("tensorflow::DataType") int value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class CostGraphDef_Node extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CostGraphDef_Node(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public CostGraphDef_Node(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public CostGraphDef_Node position(long position) { return (CostGraphDef_Node)super.position(position); } public CostGraphDef_Node() { super((Pointer)null); allocate(); } private native void allocate(); public CostGraphDef_Node(@Const @ByRef CostGraphDef_Node from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef CostGraphDef_Node from); public native @ByRef @Name("operator =") CostGraphDef_Node put(@Const @ByRef CostGraphDef_Node from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef CostGraphDef_Node default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const CostGraphDef_Node internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(CostGraphDef_Node other); public native void Swap(CostGraphDef_Node other); // implements Message ---------------------------------------------- public native CostGraphDef_Node New(); public native CostGraphDef_Node New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef CostGraphDef_Node from); public native void MergeFrom(@Const @ByRef CostGraphDef_Node from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.CostGraphDef.Node.InputInfo input_info = 4; public native int input_info_size(); public native void clear_input_info(); @MemberGetter public static native int kInputInfoFieldNumber(); public static final int kInputInfoFieldNumber = kInputInfoFieldNumber(); public native CostGraphDef_Node_InputInfo mutable_input_info(int index); public native @Const @ByRef CostGraphDef_Node_InputInfo input_info(int index); public native CostGraphDef_Node_InputInfo add_input_info(); // repeated .tensorflow.CostGraphDef.Node.OutputInfo output_info = 5; public native int output_info_size(); public native void clear_output_info(); @MemberGetter public static native int kOutputInfoFieldNumber(); public static final int kOutputInfoFieldNumber = kOutputInfoFieldNumber(); public native CostGraphDef_Node_OutputInfo mutable_output_info(int index); public native @Const @ByRef CostGraphDef_Node_OutputInfo output_info(int index); public native CostGraphDef_Node_OutputInfo add_output_info(); // repeated int32 control_input = 8; public native int control_input_size(); public native void clear_control_input(); @MemberGetter public static native int kControlInputFieldNumber(); public static final int kControlInputFieldNumber = kControlInputFieldNumber(); public native @Cast("google::protobuf::int32") int control_input(int index); public native void set_control_input(int index, @Cast("google::protobuf::int32") int value); public native void add_control_input(@Cast("google::protobuf::int32") int value); // string name = 1; public native void clear_name(); @MemberGetter public static native int kNameFieldNumber(); public static final int kNameFieldNumber = kNameFieldNumber(); public native @StdString BytePointer name(); public native void set_name(@StdString BytePointer value); public native void set_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name(); public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_name(); public native @Deprecated void unsafe_arena_set_allocated_name( @StdString @Cast({"char*", "std::string*"}) BytePointer name); // string device = 2; public native void clear_device(); @MemberGetter public static native int kDeviceFieldNumber(); public static final int kDeviceFieldNumber = kDeviceFieldNumber(); public native @StdString BytePointer device(); public native void set_device(@StdString BytePointer value); public native void set_device(@StdString String value); // #if LANG_CXX11 // #endif public native void set_device(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_device(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_device(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_device(); public native void set_allocated_device(@StdString @Cast({"char*", "std::string*"}) BytePointer device); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_device(); public native @Deprecated void unsafe_arena_set_allocated_device( @StdString @Cast({"char*", "std::string*"}) BytePointer device); // int64 temporary_memory_size = 6; public native void clear_temporary_memory_size(); @MemberGetter public static native int kTemporaryMemorySizeFieldNumber(); public static final int kTemporaryMemorySizeFieldNumber = kTemporaryMemorySizeFieldNumber(); public native @Cast("google::protobuf::int64") long temporary_memory_size(); public native void set_temporary_memory_size(@Cast("google::protobuf::int64") long value); // int64 compute_cost = 9; public native void clear_compute_cost(); @MemberGetter public static native int kComputeCostFieldNumber(); public static final int kComputeCostFieldNumber = kComputeCostFieldNumber(); public native @Cast("google::protobuf::int64") long compute_cost(); public native void set_compute_cost(@Cast("google::protobuf::int64") long value); // int32 id = 3; public native void clear_id(); @MemberGetter public static native int kIdFieldNumber(); public static final int kIdFieldNumber = kIdFieldNumber(); public native @Cast("google::protobuf::int32") int id(); public native void set_id(@Cast("google::protobuf::int32") int value); // bool is_final = 7; public native void clear_is_final(); @MemberGetter public static native int kIsFinalFieldNumber(); public static final int kIsFinalFieldNumber = kIsFinalFieldNumber(); public native @Cast("bool") boolean is_final(); public native void set_is_final(@Cast("bool") boolean value); // bool inaccurate = 17; public native void clear_inaccurate(); @MemberGetter public static native int kInaccurateFieldNumber(); public static final int kInaccurateFieldNumber = kInaccurateFieldNumber(); public native @Cast("bool") boolean inaccurate(); public native void set_inaccurate(@Cast("bool") boolean value); // int64 host_temp_memory_size = 10 [deprecated = true]; public native @Deprecated void clear_host_temp_memory_size(); @MemberGetter public static native @Deprecated int kHostTempMemorySizeFieldNumber(); public static final int kHostTempMemorySizeFieldNumber = kHostTempMemorySizeFieldNumber(); public native @Cast("google::protobuf::int64") @Deprecated long host_temp_memory_size(); public native @Deprecated void set_host_temp_memory_size(@Cast("google::protobuf::int64") long value); // int64 device_temp_memory_size = 11 [deprecated = true]; public native @Deprecated void clear_device_temp_memory_size(); @MemberGetter public static native @Deprecated int kDeviceTempMemorySizeFieldNumber(); public static final int kDeviceTempMemorySizeFieldNumber = kDeviceTempMemorySizeFieldNumber(); public native @Cast("google::protobuf::int64") @Deprecated long device_temp_memory_size(); public native @Deprecated void set_device_temp_memory_size(@Cast("google::protobuf::int64") long value); // int64 persistent_memory_size = 12; public native void clear_persistent_memory_size(); @MemberGetter public static native int kPersistentMemorySizeFieldNumber(); public static final int kPersistentMemorySizeFieldNumber = kPersistentMemorySizeFieldNumber(); public native @Cast("google::protobuf::int64") long persistent_memory_size(); public native void set_persistent_memory_size(@Cast("google::protobuf::int64") long value); // int64 compute_time = 14; public native void clear_compute_time(); @MemberGetter public static native int kComputeTimeFieldNumber(); public static final int kComputeTimeFieldNumber = kComputeTimeFieldNumber(); public native @Cast("google::protobuf::int64") long compute_time(); public native void set_compute_time(@Cast("google::protobuf::int64") long value); // int64 memory_time = 15; public native void clear_memory_time(); @MemberGetter public static native int kMemoryTimeFieldNumber(); public static final int kMemoryTimeFieldNumber = kMemoryTimeFieldNumber(); public native @Cast("google::protobuf::int64") long memory_time(); public native void set_memory_time(@Cast("google::protobuf::int64") long value); // int64 device_persistent_memory_size = 16 [deprecated = true]; public native @Deprecated void clear_device_persistent_memory_size(); @MemberGetter public static native @Deprecated int kDevicePersistentMemorySizeFieldNumber(); public static final int kDevicePersistentMemorySizeFieldNumber = kDevicePersistentMemorySizeFieldNumber(); public native @Cast("google::protobuf::int64") @Deprecated long device_persistent_memory_size(); public native @Deprecated void set_device_persistent_memory_size(@Cast("google::protobuf::int64") long value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class CostGraphDef extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CostGraphDef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public CostGraphDef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public CostGraphDef position(long position) { return (CostGraphDef)super.position(position); } public CostGraphDef() { super((Pointer)null); allocate(); } private native void allocate(); public CostGraphDef(@Const @ByRef CostGraphDef from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef CostGraphDef from); public native @ByRef @Name("operator =") CostGraphDef put(@Const @ByRef CostGraphDef from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef CostGraphDef default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const CostGraphDef internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(CostGraphDef other); public native void Swap(CostGraphDef other); // implements Message ---------------------------------------------- public native CostGraphDef New(); public native CostGraphDef New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef CostGraphDef from); public native void MergeFrom(@Const @ByRef CostGraphDef from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.CostGraphDef.Node node = 1; public native int node_size(); public native void clear_node(); @MemberGetter public static native int kNodeFieldNumber(); public static final int kNodeFieldNumber = kNodeFieldNumber(); public native CostGraphDef_Node mutable_node(int index); public native @Const @ByRef CostGraphDef_Node node(int index); public native CostGraphDef_Node add_node(); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // CostGraphDef_Node_InputInfo // int32 preceding_node = 1; // int32 preceding_port = 2; // ------------------------------------------------------------------- // CostGraphDef_Node_OutputInfo // int64 size = 1; // int64 alias_input_port = 2; // .tensorflow.TensorShapeProto shape = 3; // .tensorflow.DataType dtype = 4; // ------------------------------------------------------------------- // CostGraphDef_Node // string name = 1; // #if LANG_CXX11 // #endif // string device = 2; // #if LANG_CXX11 // #endif // int32 id = 3; // repeated .tensorflow.CostGraphDef.Node.InputInfo input_info = 4; // repeated .tensorflow.CostGraphDef.Node.OutputInfo output_info = 5; // int64 temporary_memory_size = 6; // int64 persistent_memory_size = 12; // int64 host_temp_memory_size = 10 [deprecated = true]; // int64 device_temp_memory_size = 11 [deprecated = true]; // int64 device_persistent_memory_size = 16 [deprecated = true]; // int64 compute_cost = 9; // int64 compute_time = 14; // int64 memory_time = 15; // bool is_final = 7; // repeated int32 control_input = 8; // bool inaccurate = 17; // ------------------------------------------------------------------- // CostGraphDef // repeated .tensorflow.CostGraphDef.Node node = 1; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fcost_5fgraph_2eproto // Parsed from tensorflow/core/framework/step_stats.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/framework/step_stats.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // #include "tensorflow/core/framework/allocation_description.pb.h" // #include "tensorflow/core/framework/tensor_description.pb.h" // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto // namespace tensorflow // namespace protobuf // namespace google // =================================================================== @Namespace("tensorflow") @NoOffset public static class AllocationRecord extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AllocationRecord(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public AllocationRecord(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public AllocationRecord position(long position) { return (AllocationRecord)super.position(position); } public AllocationRecord() { super((Pointer)null); allocate(); } private native void allocate(); public AllocationRecord(@Const @ByRef AllocationRecord from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef AllocationRecord from); public native @ByRef @Name("operator =") AllocationRecord put(@Const @ByRef AllocationRecord from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef AllocationRecord default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const AllocationRecord internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(AllocationRecord other); public native void Swap(AllocationRecord other); // implements Message ---------------------------------------------- public native AllocationRecord New(); public native AllocationRecord New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef AllocationRecord from); public native void MergeFrom(@Const @ByRef AllocationRecord from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // int64 alloc_micros = 1; public native void clear_alloc_micros(); @MemberGetter public static native int kAllocMicrosFieldNumber(); public static final int kAllocMicrosFieldNumber = kAllocMicrosFieldNumber(); public native @Cast("google::protobuf::int64") long alloc_micros(); public native void set_alloc_micros(@Cast("google::protobuf::int64") long value); // int64 alloc_bytes = 2; public native void clear_alloc_bytes(); @MemberGetter public static native int kAllocBytesFieldNumber(); public static final int kAllocBytesFieldNumber = kAllocBytesFieldNumber(); public native @Cast("google::protobuf::int64") long alloc_bytes(); public native void set_alloc_bytes(@Cast("google::protobuf::int64") long value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class AllocatorMemoryUsed extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AllocatorMemoryUsed(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public AllocatorMemoryUsed(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public AllocatorMemoryUsed position(long position) { return (AllocatorMemoryUsed)super.position(position); } public AllocatorMemoryUsed() { super((Pointer)null); allocate(); } private native void allocate(); public AllocatorMemoryUsed(@Const @ByRef AllocatorMemoryUsed from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef AllocatorMemoryUsed from); public native @ByRef @Name("operator =") AllocatorMemoryUsed put(@Const @ByRef AllocatorMemoryUsed from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef AllocatorMemoryUsed default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const AllocatorMemoryUsed internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(AllocatorMemoryUsed other); public native void Swap(AllocatorMemoryUsed other); // implements Message ---------------------------------------------- public native AllocatorMemoryUsed New(); public native AllocatorMemoryUsed New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef AllocatorMemoryUsed from); public native void MergeFrom(@Const @ByRef AllocatorMemoryUsed from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.AllocationRecord allocation_records = 6; public native int allocation_records_size(); public native void clear_allocation_records(); @MemberGetter public static native int kAllocationRecordsFieldNumber(); public static final int kAllocationRecordsFieldNumber = kAllocationRecordsFieldNumber(); public native AllocationRecord mutable_allocation_records(int index); public native @Const @ByRef AllocationRecord allocation_records(int index); public native AllocationRecord add_allocation_records(); // string allocator_name = 1; public native void clear_allocator_name(); @MemberGetter public static native int kAllocatorNameFieldNumber(); public static final int kAllocatorNameFieldNumber = kAllocatorNameFieldNumber(); public native @StdString BytePointer allocator_name(); public native void set_allocator_name(@StdString BytePointer value); public native void set_allocator_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_allocator_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_allocator_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_allocator_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_allocator_name(); public native void set_allocated_allocator_name(@StdString @Cast({"char*", "std::string*"}) BytePointer allocator_name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_allocator_name(); public native @Deprecated void unsafe_arena_set_allocated_allocator_name( @StdString @Cast({"char*", "std::string*"}) BytePointer allocator_name); // int64 total_bytes = 2; public native void clear_total_bytes(); @MemberGetter public static native int kTotalBytesFieldNumber(); public static final int kTotalBytesFieldNumber = kTotalBytesFieldNumber(); public native @Cast("google::protobuf::int64") long total_bytes(); public native void set_total_bytes(@Cast("google::protobuf::int64") long value); // int64 peak_bytes = 3; public native void clear_peak_bytes(); @MemberGetter public static native int kPeakBytesFieldNumber(); public static final int kPeakBytesFieldNumber = kPeakBytesFieldNumber(); public native @Cast("google::protobuf::int64") long peak_bytes(); public native void set_peak_bytes(@Cast("google::protobuf::int64") long value); // int64 live_bytes = 4; public native void clear_live_bytes(); @MemberGetter public static native int kLiveBytesFieldNumber(); public static final int kLiveBytesFieldNumber = kLiveBytesFieldNumber(); public native @Cast("google::protobuf::int64") long live_bytes(); public native void set_live_bytes(@Cast("google::protobuf::int64") long value); // int64 allocator_bytes_in_use = 5; public native void clear_allocator_bytes_in_use(); @MemberGetter public static native int kAllocatorBytesInUseFieldNumber(); public static final int kAllocatorBytesInUseFieldNumber = kAllocatorBytesInUseFieldNumber(); public native @Cast("google::protobuf::int64") long allocator_bytes_in_use(); public native void set_allocator_bytes_in_use(@Cast("google::protobuf::int64") long value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class NodeOutput extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NodeOutput(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public NodeOutput(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public NodeOutput position(long position) { return (NodeOutput)super.position(position); } public NodeOutput() { super((Pointer)null); allocate(); } private native void allocate(); public NodeOutput(@Const @ByRef NodeOutput from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef NodeOutput from); public native @ByRef @Name("operator =") NodeOutput put(@Const @ByRef NodeOutput from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef NodeOutput default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const NodeOutput internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(NodeOutput other); public native void Swap(NodeOutput other); // implements Message ---------------------------------------------- public native NodeOutput New(); public native NodeOutput New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef NodeOutput from); public native void MergeFrom(@Const @ByRef NodeOutput from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // .tensorflow.TensorDescription tensor_description = 3; public native @Cast("bool") boolean has_tensor_description(); public native void clear_tensor_description(); @MemberGetter public static native int kTensorDescriptionFieldNumber(); public static final int kTensorDescriptionFieldNumber = kTensorDescriptionFieldNumber(); public native @Const @ByRef TensorDescription tensor_description(); public native TensorDescription release_tensor_description(); public native TensorDescription mutable_tensor_description(); public native void set_allocated_tensor_description(TensorDescription tensor_description); public native void unsafe_arena_set_allocated_tensor_description( TensorDescription tensor_description); public native TensorDescription unsafe_arena_release_tensor_description(); // int32 slot = 1; public native void clear_slot(); @MemberGetter public static native int kSlotFieldNumber(); public static final int kSlotFieldNumber = kSlotFieldNumber(); public native @Cast("google::protobuf::int32") int slot(); public native void set_slot(@Cast("google::protobuf::int32") int value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class MemoryStats extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MemoryStats(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public MemoryStats(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public MemoryStats position(long position) { return (MemoryStats)super.position(position); } public MemoryStats() { super((Pointer)null); allocate(); } private native void allocate(); public MemoryStats(@Const @ByRef MemoryStats from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef MemoryStats from); public native @ByRef @Name("operator =") MemoryStats put(@Const @ByRef MemoryStats from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef MemoryStats default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const MemoryStats internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(MemoryStats other); public native void Swap(MemoryStats other); // implements Message ---------------------------------------------- public native MemoryStats New(); public native MemoryStats New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef MemoryStats from); public native void MergeFrom(@Const @ByRef MemoryStats from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated int64 persistent_tensor_alloc_ids = 5; public native int persistent_tensor_alloc_ids_size(); public native void clear_persistent_tensor_alloc_ids(); @MemberGetter public static native int kPersistentTensorAllocIdsFieldNumber(); public static final int kPersistentTensorAllocIdsFieldNumber = kPersistentTensorAllocIdsFieldNumber(); public native @Cast("google::protobuf::int64") long persistent_tensor_alloc_ids(int index); public native void set_persistent_tensor_alloc_ids(int index, @Cast("google::protobuf::int64") long value); public native void add_persistent_tensor_alloc_ids(@Cast("google::protobuf::int64") long value); // repeated int64 device_persistent_tensor_alloc_ids = 6 [deprecated = true]; public native @Deprecated int device_persistent_tensor_alloc_ids_size(); public native @Deprecated void clear_device_persistent_tensor_alloc_ids(); @MemberGetter public static native @Deprecated int kDevicePersistentTensorAllocIdsFieldNumber(); public static final int kDevicePersistentTensorAllocIdsFieldNumber = kDevicePersistentTensorAllocIdsFieldNumber(); public native @Cast("google::protobuf::int64") @Deprecated long device_persistent_tensor_alloc_ids(int index); public native @Deprecated void set_device_persistent_tensor_alloc_ids(int index, @Cast("google::protobuf::int64") long value); public native @Deprecated void add_device_persistent_tensor_alloc_ids(@Cast("google::protobuf::int64") long value); // int64 temp_memory_size = 1; public native void clear_temp_memory_size(); @MemberGetter public static native int kTempMemorySizeFieldNumber(); public static final int kTempMemorySizeFieldNumber = kTempMemorySizeFieldNumber(); public native @Cast("google::protobuf::int64") long temp_memory_size(); public native void set_temp_memory_size(@Cast("google::protobuf::int64") long value); // int64 device_temp_memory_size = 2 [deprecated = true]; public native @Deprecated void clear_device_temp_memory_size(); @MemberGetter public static native @Deprecated int kDeviceTempMemorySizeFieldNumber(); public static final int kDeviceTempMemorySizeFieldNumber = kDeviceTempMemorySizeFieldNumber(); public native @Cast("google::protobuf::int64") @Deprecated long device_temp_memory_size(); public native @Deprecated void set_device_temp_memory_size(@Cast("google::protobuf::int64") long value); // int64 persistent_memory_size = 3; public native void clear_persistent_memory_size(); @MemberGetter public static native int kPersistentMemorySizeFieldNumber(); public static final int kPersistentMemorySizeFieldNumber = kPersistentMemorySizeFieldNumber(); public native @Cast("google::protobuf::int64") long persistent_memory_size(); public native void set_persistent_memory_size(@Cast("google::protobuf::int64") long value); // int64 device_persistent_memory_size = 4 [deprecated = true]; public native @Deprecated void clear_device_persistent_memory_size(); @MemberGetter public static native @Deprecated int kDevicePersistentMemorySizeFieldNumber(); public static final int kDevicePersistentMemorySizeFieldNumber = kDevicePersistentMemorySizeFieldNumber(); public native @Cast("google::protobuf::int64") @Deprecated long device_persistent_memory_size(); public native @Deprecated void set_device_persistent_memory_size(@Cast("google::protobuf::int64") long value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class NodeExecStats extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NodeExecStats(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public NodeExecStats(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public NodeExecStats position(long position) { return (NodeExecStats)super.position(position); } public NodeExecStats() { super((Pointer)null); allocate(); } private native void allocate(); public NodeExecStats(@Const @ByRef NodeExecStats from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef NodeExecStats from); public native @ByRef @Name("operator =") NodeExecStats put(@Const @ByRef NodeExecStats from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef NodeExecStats default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const NodeExecStats internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(NodeExecStats other); public native void Swap(NodeExecStats other); // implements Message ---------------------------------------------- public native NodeExecStats New(); public native NodeExecStats New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef NodeExecStats from); public native void MergeFrom(@Const @ByRef NodeExecStats from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.AllocatorMemoryUsed memory = 6; public native int memory_size(); public native void clear_memory(); @MemberGetter public static native int kMemoryFieldNumber(); public static final int kMemoryFieldNumber = kMemoryFieldNumber(); public native AllocatorMemoryUsed mutable_memory(int index); public native @Const @ByRef AllocatorMemoryUsed memory(int index); public native AllocatorMemoryUsed add_memory(); // repeated .tensorflow.NodeOutput output = 7; public native int output_size(); public native void clear_output(); @MemberGetter public static native int kOutputFieldNumber(); public static final int kOutputFieldNumber = kOutputFieldNumber(); public native NodeOutput mutable_output(int index); public native @Const @ByRef NodeOutput output(int index); public native NodeOutput add_output(); // repeated .tensorflow.AllocationDescription referenced_tensor = 11; public native int referenced_tensor_size(); public native void clear_referenced_tensor(); @MemberGetter public static native int kReferencedTensorFieldNumber(); public static final int kReferencedTensorFieldNumber = kReferencedTensorFieldNumber(); public native AllocationDescription mutable_referenced_tensor(int index); public native @Const @ByRef AllocationDescription referenced_tensor(int index); public native AllocationDescription add_referenced_tensor(); // string node_name = 1; public native void clear_node_name(); @MemberGetter public static native int kNodeNameFieldNumber(); public static final int kNodeNameFieldNumber = kNodeNameFieldNumber(); public native @StdString BytePointer node_name(); public native void set_node_name(@StdString BytePointer value); public native void set_node_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_node_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_node_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_node_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_node_name(); public native void set_allocated_node_name(@StdString @Cast({"char*", "std::string*"}) BytePointer node_name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_node_name(); public native @Deprecated void unsafe_arena_set_allocated_node_name( @StdString @Cast({"char*", "std::string*"}) BytePointer node_name); // string timeline_label = 8; public native void clear_timeline_label(); @MemberGetter public static native int kTimelineLabelFieldNumber(); public static final int kTimelineLabelFieldNumber = kTimelineLabelFieldNumber(); public native @StdString BytePointer timeline_label(); public native void set_timeline_label(@StdString BytePointer value); public native void set_timeline_label(@StdString String value); // #if LANG_CXX11 // #endif public native void set_timeline_label(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_timeline_label(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_timeline_label(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_timeline_label(); public native void set_allocated_timeline_label(@StdString @Cast({"char*", "std::string*"}) BytePointer timeline_label); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_timeline_label(); public native @Deprecated void unsafe_arena_set_allocated_timeline_label( @StdString @Cast({"char*", "std::string*"}) BytePointer timeline_label); // .tensorflow.MemoryStats memory_stats = 12; public native @Cast("bool") boolean has_memory_stats(); public native void clear_memory_stats(); @MemberGetter public static native int kMemoryStatsFieldNumber(); public static final int kMemoryStatsFieldNumber = kMemoryStatsFieldNumber(); public native @Const @ByRef MemoryStats memory_stats(); public native MemoryStats release_memory_stats(); public native MemoryStats mutable_memory_stats(); public native void set_allocated_memory_stats(MemoryStats memory_stats); public native void unsafe_arena_set_allocated_memory_stats( MemoryStats memory_stats); public native MemoryStats unsafe_arena_release_memory_stats(); // int64 all_start_micros = 2; public native void clear_all_start_micros(); @MemberGetter public static native int kAllStartMicrosFieldNumber(); public static final int kAllStartMicrosFieldNumber = kAllStartMicrosFieldNumber(); public native @Cast("google::protobuf::int64") long all_start_micros(); public native void set_all_start_micros(@Cast("google::protobuf::int64") long value); // int64 op_start_rel_micros = 3; public native void clear_op_start_rel_micros(); @MemberGetter public static native int kOpStartRelMicrosFieldNumber(); public static final int kOpStartRelMicrosFieldNumber = kOpStartRelMicrosFieldNumber(); public native @Cast("google::protobuf::int64") long op_start_rel_micros(); public native void set_op_start_rel_micros(@Cast("google::protobuf::int64") long value); // int64 op_end_rel_micros = 4; public native void clear_op_end_rel_micros(); @MemberGetter public static native int kOpEndRelMicrosFieldNumber(); public static final int kOpEndRelMicrosFieldNumber = kOpEndRelMicrosFieldNumber(); public native @Cast("google::protobuf::int64") long op_end_rel_micros(); public native void set_op_end_rel_micros(@Cast("google::protobuf::int64") long value); // int64 all_end_rel_micros = 5; public native void clear_all_end_rel_micros(); @MemberGetter public static native int kAllEndRelMicrosFieldNumber(); public static final int kAllEndRelMicrosFieldNumber = kAllEndRelMicrosFieldNumber(); public native @Cast("google::protobuf::int64") long all_end_rel_micros(); public native void set_all_end_rel_micros(@Cast("google::protobuf::int64") long value); // int64 scheduled_micros = 9; public native void clear_scheduled_micros(); @MemberGetter public static native int kScheduledMicrosFieldNumber(); public static final int kScheduledMicrosFieldNumber = kScheduledMicrosFieldNumber(); public native @Cast("google::protobuf::int64") long scheduled_micros(); public native void set_scheduled_micros(@Cast("google::protobuf::int64") long value); // int64 all_start_nanos = 13; public native void clear_all_start_nanos(); @MemberGetter public static native int kAllStartNanosFieldNumber(); public static final int kAllStartNanosFieldNumber = kAllStartNanosFieldNumber(); public native @Cast("google::protobuf::int64") long all_start_nanos(); public native void set_all_start_nanos(@Cast("google::protobuf::int64") long value); // int64 op_start_rel_nanos = 14; public native void clear_op_start_rel_nanos(); @MemberGetter public static native int kOpStartRelNanosFieldNumber(); public static final int kOpStartRelNanosFieldNumber = kOpStartRelNanosFieldNumber(); public native @Cast("google::protobuf::int64") long op_start_rel_nanos(); public native void set_op_start_rel_nanos(@Cast("google::protobuf::int64") long value); // int64 op_end_rel_nanos = 15; public native void clear_op_end_rel_nanos(); @MemberGetter public static native int kOpEndRelNanosFieldNumber(); public static final int kOpEndRelNanosFieldNumber = kOpEndRelNanosFieldNumber(); public native @Cast("google::protobuf::int64") long op_end_rel_nanos(); public native void set_op_end_rel_nanos(@Cast("google::protobuf::int64") long value); // int64 all_end_rel_nanos = 16; public native void clear_all_end_rel_nanos(); @MemberGetter public static native int kAllEndRelNanosFieldNumber(); public static final int kAllEndRelNanosFieldNumber = kAllEndRelNanosFieldNumber(); public native @Cast("google::protobuf::int64") long all_end_rel_nanos(); public native void set_all_end_rel_nanos(@Cast("google::protobuf::int64") long value); // int64 scheduled_nanos = 17; public native void clear_scheduled_nanos(); @MemberGetter public static native int kScheduledNanosFieldNumber(); public static final int kScheduledNanosFieldNumber = kScheduledNanosFieldNumber(); public native @Cast("google::protobuf::int64") long scheduled_nanos(); public native void set_scheduled_nanos(@Cast("google::protobuf::int64") long value); // uint32 thread_id = 10; public native void clear_thread_id(); @MemberGetter public static native int kThreadIdFieldNumber(); public static final int kThreadIdFieldNumber = kThreadIdFieldNumber(); public native @Cast("google::protobuf::uint32") int thread_id(); public native void set_thread_id(@Cast("google::protobuf::uint32") int value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class DeviceStepStats extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DeviceStepStats(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public DeviceStepStats(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public DeviceStepStats position(long position) { return (DeviceStepStats)super.position(position); } public DeviceStepStats() { super((Pointer)null); allocate(); } private native void allocate(); public DeviceStepStats(@Const @ByRef DeviceStepStats from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef DeviceStepStats from); public native @ByRef @Name("operator =") DeviceStepStats put(@Const @ByRef DeviceStepStats from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef DeviceStepStats default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const DeviceStepStats internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(DeviceStepStats other); public native void Swap(DeviceStepStats other); // implements Message ---------------------------------------------- public native DeviceStepStats New(); public native DeviceStepStats New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef DeviceStepStats from); public native void MergeFrom(@Const @ByRef DeviceStepStats from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.NodeExecStats node_stats = 2; public native int node_stats_size(); public native void clear_node_stats(); @MemberGetter public static native int kNodeStatsFieldNumber(); public static final int kNodeStatsFieldNumber = kNodeStatsFieldNumber(); public native NodeExecStats mutable_node_stats(int index); public native @Const @ByRef NodeExecStats node_stats(int index); public native NodeExecStats add_node_stats(); // string device = 1; public native void clear_device(); @MemberGetter public static native int kDeviceFieldNumber(); public static final int kDeviceFieldNumber = kDeviceFieldNumber(); public native @StdString BytePointer device(); public native void set_device(@StdString BytePointer value); public native void set_device(@StdString String value); // #if LANG_CXX11 // #endif public native void set_device(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_device(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_device(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_device(); public native void set_allocated_device(@StdString @Cast({"char*", "std::string*"}) BytePointer device); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_device(); public native @Deprecated void unsafe_arena_set_allocated_device( @StdString @Cast({"char*", "std::string*"}) BytePointer device); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class StepStats extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StepStats(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public StepStats(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public StepStats position(long position) { return (StepStats)super.position(position); } public StepStats() { super((Pointer)null); allocate(); } private native void allocate(); public StepStats(@Const @ByRef StepStats from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef StepStats from); public native @ByRef @Name("operator =") StepStats put(@Const @ByRef StepStats from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef StepStats default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const StepStats internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(StepStats other); public native void Swap(StepStats other); // implements Message ---------------------------------------------- public native StepStats New(); public native StepStats New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef StepStats from); public native void MergeFrom(@Const @ByRef StepStats from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.DeviceStepStats dev_stats = 1; public native int dev_stats_size(); public native void clear_dev_stats(); @MemberGetter public static native int kDevStatsFieldNumber(); public static final int kDevStatsFieldNumber = kDevStatsFieldNumber(); public native DeviceStepStats mutable_dev_stats(int index); public native @Const @ByRef DeviceStepStats dev_stats(int index); public native DeviceStepStats add_dev_stats(); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // AllocationRecord // int64 alloc_micros = 1; // int64 alloc_bytes = 2; // ------------------------------------------------------------------- // AllocatorMemoryUsed // string allocator_name = 1; // #if LANG_CXX11 // #endif // int64 total_bytes = 2; // int64 peak_bytes = 3; // int64 live_bytes = 4; // repeated .tensorflow.AllocationRecord allocation_records = 6; // int64 allocator_bytes_in_use = 5; // ------------------------------------------------------------------- // NodeOutput // int32 slot = 1; // .tensorflow.TensorDescription tensor_description = 3; // ------------------------------------------------------------------- // MemoryStats // int64 temp_memory_size = 1; // int64 persistent_memory_size = 3; // repeated int64 persistent_tensor_alloc_ids = 5; // int64 device_temp_memory_size = 2 [deprecated = true]; // int64 device_persistent_memory_size = 4 [deprecated = true]; // repeated int64 device_persistent_tensor_alloc_ids = 6 [deprecated = true]; // ------------------------------------------------------------------- // NodeExecStats // string node_name = 1; // #if LANG_CXX11 // #endif // int64 all_start_micros = 2; // int64 op_start_rel_micros = 3; // int64 op_end_rel_micros = 4; // int64 all_end_rel_micros = 5; // repeated .tensorflow.AllocatorMemoryUsed memory = 6; // repeated .tensorflow.NodeOutput output = 7; // string timeline_label = 8; // #if LANG_CXX11 // #endif // int64 scheduled_micros = 9; // uint32 thread_id = 10; // repeated .tensorflow.AllocationDescription referenced_tensor = 11; // .tensorflow.MemoryStats memory_stats = 12; // int64 all_start_nanos = 13; // int64 op_start_rel_nanos = 14; // int64 op_end_rel_nanos = 15; // int64 all_end_rel_nanos = 16; // int64 scheduled_nanos = 17; // ------------------------------------------------------------------- // DeviceStepStats // string device = 1; // #if LANG_CXX11 // #endif // repeated .tensorflow.NodeExecStats node_stats = 2; // ------------------------------------------------------------------- // StepStats // repeated .tensorflow.DeviceStepStats dev_stats = 1; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto // Parsed from tensorflow/core/framework/versions.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/framework/versions.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fversions_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fversions_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fframework_2fversions_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fframework_2fversions_2eproto // namespace tensorflow // namespace protobuf // namespace google // =================================================================== @Namespace("tensorflow") @NoOffset public static class VersionDef extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VersionDef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public VersionDef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public VersionDef position(long position) { return (VersionDef)super.position(position); } public VersionDef() { super((Pointer)null); allocate(); } private native void allocate(); public VersionDef(@Const @ByRef VersionDef from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef VersionDef from); public native @ByRef @Name("operator =") VersionDef put(@Const @ByRef VersionDef from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef VersionDef default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const VersionDef internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(VersionDef other); public native void Swap(VersionDef other); // implements Message ---------------------------------------------- public native VersionDef New(); public native VersionDef New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef VersionDef from); public native void MergeFrom(@Const @ByRef VersionDef from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated int32 bad_consumers = 3; public native int bad_consumers_size(); public native void clear_bad_consumers(); @MemberGetter public static native int kBadConsumersFieldNumber(); public static final int kBadConsumersFieldNumber = kBadConsumersFieldNumber(); public native @Cast("google::protobuf::int32") int bad_consumers(int index); public native void set_bad_consumers(int index, @Cast("google::protobuf::int32") int value); public native void add_bad_consumers(@Cast("google::protobuf::int32") int value); // int32 producer = 1; public native void clear_producer(); @MemberGetter public static native int kProducerFieldNumber(); public static final int kProducerFieldNumber = kProducerFieldNumber(); public native @Cast("google::protobuf::int32") int producer(); public native void set_producer(@Cast("google::protobuf::int32") int value); // int32 min_consumer = 2; public native void clear_min_consumer(); @MemberGetter public static native int kMinConsumerFieldNumber(); public static final int kMinConsumerFieldNumber = kMinConsumerFieldNumber(); public native @Cast("google::protobuf::int32") int min_consumer(); public native void set_min_consumer(@Cast("google::protobuf::int32") int value); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // VersionDef // int32 producer = 1; // int32 min_consumer = 2; // repeated int32 bad_consumers = 3; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fversions_2eproto // Parsed from tensorflow/core/public/session_options.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_PUBLIC_SESSION_OPTIONS_H_ // #define TENSORFLOW_PUBLIC_SESSION_OPTIONS_H_ // #include // #include "tensorflow/core/platform/types.h" // #include "tensorflow/core/protobuf/config.pb.h" /** Configuration information for a Session. */ @Namespace("tensorflow") @NoOffset public static class SessionOptions extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SessionOptions(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public SessionOptions(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public SessionOptions position(long position) { return (SessionOptions)super.position(position); } /** The environment to use. */ /// /// /// /// /// public native Env env(); public native SessionOptions env(Env env); /** \brief The TensorFlow runtime to connect to. * * If 'target' is empty or unspecified, the local TensorFlow runtime * implementation will be used. Otherwise, the TensorFlow engine * defined by 'target' will be used to perform all computations. * * "target" can be either a single entry or a comma separated list * of entries. Each entry is a resolvable address of the * following format: * local * ip:port * host:port * ... other system-specific formats to identify tasks and jobs ... * * NOTE: at the moment 'local' maps to an in-process service-based * runtime. * * Upon creation, a single session affines itself to one of the * remote processes, with possible load balancing choices when the * "target" resolves to a list of possible processes. * * If the session disconnects from the remote process during its * lifetime, session calls may fail immediately. */ public native @StdString BytePointer target(); public native SessionOptions target(BytePointer target); /** Configuration options. */ public native @ByRef ConfigProto config(); public native SessionOptions config(ConfigProto config); public SessionOptions() { super((Pointer)null); allocate(); } private native void allocate(); } // end namespace tensorflow // #endif // TENSORFLOW_PUBLIC_SESSION_OPTIONS_H_ // Parsed from tensorflow/core/lib/core/threadpool.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_LIB_CORE_THREADPOOL_H_ // #define TENSORFLOW_CORE_LIB_CORE_THREADPOOL_H_ // #include // #include // #include "tensorflow/core/platform/env.h" // #include "tensorflow/core/platform/macros.h" // #include "tensorflow/core/platform/types.h" @Namespace("tensorflow::thread") @NoOffset public static class ThreadPool extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ThreadPool(Pointer p) { super(p); } // Constructs a pool that contains "num_threads" threads with specified // "name". env->StartThread() is used to create individual threads with the // given ThreadOptions. If "low_latency_hint" is true the thread pool // implementation may use it as a hint that lower latency is preferred at the // cost of higher CPU usage, e.g. by letting one or more idle threads spin // wait. Conversely, if the threadpool is used to schedule high-latency // operations like I/O the hint should be set to false. // // REQUIRES: num_threads > 0 public ThreadPool(Env env, @Const @ByRef ThreadOptions thread_options, @StdString BytePointer name, int num_threads, @Cast("bool") boolean low_latency_hint) { super((Pointer)null); allocate(env, thread_options, name, num_threads, low_latency_hint); } private native void allocate(Env env, @Const @ByRef ThreadOptions thread_options, @StdString BytePointer name, int num_threads, @Cast("bool") boolean low_latency_hint); public ThreadPool(Env env, @Const @ByRef ThreadOptions thread_options, @StdString String name, int num_threads, @Cast("bool") boolean low_latency_hint) { super((Pointer)null); allocate(env, thread_options, name, num_threads, low_latency_hint); } private native void allocate(Env env, @Const @ByRef ThreadOptions thread_options, @StdString String name, int num_threads, @Cast("bool") boolean low_latency_hint); // Constructs a pool for low-latency ops that contains "num_threads" threads // with specified "name". env->StartThread() is used to create individual // threads. // REQUIRES: num_threads > 0 public ThreadPool(Env env, @StdString BytePointer name, int num_threads) { super((Pointer)null); allocate(env, name, num_threads); } private native void allocate(Env env, @StdString BytePointer name, int num_threads); public ThreadPool(Env env, @StdString String name, int num_threads) { super((Pointer)null); allocate(env, name, num_threads); } private native void allocate(Env env, @StdString String name, int num_threads); // Constructs a pool for low-latency ops that contains "num_threads" threads // with specified "name". env->StartThread() is used to create individual // threads with the given ThreadOptions. // REQUIRES: num_threads > 0 public ThreadPool(Env env, @Const @ByRef ThreadOptions thread_options, @StdString BytePointer name, int num_threads) { super((Pointer)null); allocate(env, thread_options, name, num_threads); } private native void allocate(Env env, @Const @ByRef ThreadOptions thread_options, @StdString BytePointer name, int num_threads); public ThreadPool(Env env, @Const @ByRef ThreadOptions thread_options, @StdString String name, int num_threads) { super((Pointer)null); allocate(env, thread_options, name, num_threads); } private native void allocate(Env env, @Const @ByRef ThreadOptions thread_options, @StdString String name, int num_threads); // Waits until all scheduled work has finished and then destroy the // set of threads. // Schedules fn() for execution in the pool of threads. public native void Schedule(@ByVal Fn fn); // Requires 0 < block_size <= total. // Spawns k threads and calls fn(i*block_size, (i+1)*block_size) from the // ith thread (i>=0). When (i+1)*block_size > total, fn(i*block_size, total) // is called instead. k = NumShardsUsedByTransformRangeConcurrently(...). // Note that when there aren't enough threads in the pool to achieve full // parallelism, function calls will be automatically queued. public native void TransformRangeConcurrently(@Cast("const tensorflow::int64") long block_size, @Cast("const tensorflow::int64") long total, @Const @ByRef ForFn fn); // Returns the number of threads spawned by calling TransformRangeConcurrently // with these parameters. public native int NumShardsUsedByTransformRangeConcurrently(@Cast("const tensorflow::int64") long block_size, @Cast("const tensorflow::int64") long total); // ParallelFor shards the "total" units of work assuming each unit of work // having roughly "cost_per_unit" cost, in cycles. Each unit of work is // indexed 0, 1, ..., total - 1. Each shard contains 1 or more units of work // and the total cost of each shard is roughly the same. // // "cost_per_unit" is an estimate of the number of CPU cycles (or nanoseconds // if not CPU-bound) to complete a unit of work. Overestimating creates too // many shards and CPU time will be dominated by per-shard overhead, such as // Context creation. Underestimating may not fully make use of the specified // parallelism. public native void ParallelFor(@Cast("tensorflow::int64") long total, @Cast("tensorflow::int64") long cost_per_unit, @ByVal ForFn fn); // Shards the "total" units of work. For more details, see "ParallelFor". // // The function is passed a thread_id between 0 and NumThreads() *inclusive*. // This is because some work can happen on the caller thread while the threads // in the pool are also being used. // // The caller can allocate NumThreads() + 1 separate buffers for each thread. // Each thread can safely write to the buffer given by its id without // synchronization. However, the worker fn may be called multiple times // sequentially with the same id. // // At most NumThreads() unique ids will actually be used, and only a few may // be used for small workloads. If each buffer is expensive, the buffers // should be stored in an array initially filled with null, and a buffer // should be allocated by fn the first time that the id is used. public native void ParallelForWithWorkerId( @Cast("tensorflow::int64") long total, @Cast("tensorflow::int64") long cost_per_unit, @Const @ByRef ParallelForFn fn); // Returns the number of threads in the pool. public native int NumThreads(); // Returns current thread id between 0 and NumThreads() - 1, if called from a // thread in the pool. Returns -1 otherwise. public native int CurrentThreadId(); @Opaque public static class Impl extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public Impl() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Impl(Pointer p) { super(p); } } } // namespace thread // namespace tensorflow // #endif // TENSORFLOW_CORE_LIB_CORE_THREADPOOL_H_ // Parsed from tensorflow/core/framework/allocation_description.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/framework/allocation_description.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fallocation_5fdescription_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fallocation_5fdescription_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fframework_2fallocation_5fdescription_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fframework_2fallocation_5fdescription_2eproto // namespace tensorflow // namespace protobuf // namespace google // =================================================================== @Namespace("tensorflow") @NoOffset public static class AllocationDescription extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AllocationDescription(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public AllocationDescription(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public AllocationDescription position(long position) { return (AllocationDescription)super.position(position); } public AllocationDescription() { super((Pointer)null); allocate(); } private native void allocate(); public AllocationDescription(@Const @ByRef AllocationDescription from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef AllocationDescription from); public native @ByRef @Name("operator =") AllocationDescription put(@Const @ByRef AllocationDescription from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef AllocationDescription default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const AllocationDescription internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(AllocationDescription other); public native void Swap(AllocationDescription other); // implements Message ---------------------------------------------- public native AllocationDescription New(); public native AllocationDescription New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef AllocationDescription from); public native void MergeFrom(@Const @ByRef AllocationDescription from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // string allocator_name = 3; public native void clear_allocator_name(); @MemberGetter public static native int kAllocatorNameFieldNumber(); public static final int kAllocatorNameFieldNumber = kAllocatorNameFieldNumber(); public native @StdString BytePointer allocator_name(); public native void set_allocator_name(@StdString BytePointer value); public native void set_allocator_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_allocator_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_allocator_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_allocator_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_allocator_name(); public native void set_allocated_allocator_name(@StdString @Cast({"char*", "std::string*"}) BytePointer allocator_name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_allocator_name(); public native @Deprecated void unsafe_arena_set_allocated_allocator_name( @StdString @Cast({"char*", "std::string*"}) BytePointer allocator_name); // int64 requested_bytes = 1; public native void clear_requested_bytes(); @MemberGetter public static native int kRequestedBytesFieldNumber(); public static final int kRequestedBytesFieldNumber = kRequestedBytesFieldNumber(); public native @Cast("google::protobuf::int64") long requested_bytes(); public native void set_requested_bytes(@Cast("google::protobuf::int64") long value); // int64 allocated_bytes = 2; public native void clear_allocated_bytes(); @MemberGetter public static native int kAllocatedBytesFieldNumber(); public static final int kAllocatedBytesFieldNumber = kAllocatedBytesFieldNumber(); public native @Cast("google::protobuf::int64") long allocated_bytes(); public native void set_allocated_bytes(@Cast("google::protobuf::int64") long value); // int64 allocation_id = 4; public native void clear_allocation_id(); @MemberGetter public static native int kAllocationIdFieldNumber(); public static final int kAllocationIdFieldNumber = kAllocationIdFieldNumber(); public native @Cast("google::protobuf::int64") long allocation_id(); public native void set_allocation_id(@Cast("google::protobuf::int64") long value); // uint64 ptr = 6; public native void clear_ptr(); @MemberGetter public static native int kPtrFieldNumber(); public static final int kPtrFieldNumber = kPtrFieldNumber(); public native @Cast("google::protobuf::uint64") long ptr(); public native void set_ptr(@Cast("google::protobuf::uint64") long value); // bool has_single_reference = 5; public native void clear_has_single_reference(); @MemberGetter public static native int kHasSingleReferenceFieldNumber(); public static final int kHasSingleReferenceFieldNumber = kHasSingleReferenceFieldNumber(); public native @Cast("bool") boolean has_single_reference(); public native void set_has_single_reference(@Cast("bool") boolean value); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // AllocationDescription // int64 requested_bytes = 1; // int64 allocated_bytes = 2; // string allocator_name = 3; // #if LANG_CXX11 // #endif // int64 allocation_id = 4; // bool has_single_reference = 5; // uint64 ptr = 6; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fallocation_5fdescription_2eproto // Parsed from tensorflow/core/framework/allocator.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_ALLOCATOR_H_ // #define TENSORFLOW_CORE_FRAMEWORK_ALLOCATOR_H_ // #include // #include // #include "tensorflow/core/framework/numeric_types.h" // #include "tensorflow/core/framework/resource_handle.h" // #include "tensorflow/core/framework/type_traits.h" // #include "tensorflow/core/platform/logging.h" // #include "tensorflow/core/platform/mutex.h" // #include "tensorflow/core/platform/types.h" @Namespace("tensorflow") @Opaque public static class Variant extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public Variant() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Variant(Pointer p) { super(p); } } // Attributes for a single allocation call. Different calls to the same // allocator could potentially have different allocation attributes. @Namespace("tensorflow") public static class AllocationAttributes extends Pointer { static { Loader.load(); } /** Default native constructor. */ public AllocationAttributes() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public AllocationAttributes(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AllocationAttributes(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public AllocationAttributes position(long position) { return (AllocationAttributes)super.position(position); } // If the first attempt to allocate the memory fails, the allocation // should return immediately without retrying. // An example use case is optional scratch spaces where a failure // has only performance impact. public native @Cast("bool") boolean no_retry_on_failure(); public native AllocationAttributes no_retry_on_failure(boolean no_retry_on_failure); // If a Tensor is allocated without the following set to true, then // it is logged as an unknown allocation. During execution Tensors // should be allocated through the OpKernelContext which records // which Op is performing the allocation, and sets this flag to // true. public native @Cast("bool") boolean allocation_will_be_logged(); public native AllocationAttributes allocation_will_be_logged(boolean allocation_will_be_logged); } // Runtime statistics collected by an allocator. @Namespace("tensorflow") @NoOffset public static class AllocatorStats extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AllocatorStats(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public AllocatorStats(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public AllocatorStats position(long position) { return (AllocatorStats)super.position(position); } public native @Cast("tensorflow::int64") long num_allocs(); public native AllocatorStats num_allocs(long num_allocs); // Number of allocations. public native @Cast("tensorflow::int64") long bytes_in_use(); public native AllocatorStats bytes_in_use(long bytes_in_use); // Number of bytes in use. public native @Cast("tensorflow::int64") long max_bytes_in_use(); public native AllocatorStats max_bytes_in_use(long max_bytes_in_use); // The maximum bytes in use. public native @Cast("tensorflow::int64") long max_alloc_size(); public native AllocatorStats max_alloc_size(long max_alloc_size); // The max single allocation seen. // The upper limit what the allocator can allocate, if such a limit // is known. Certain allocator may return 0 to indicate the limit is // unknown. public native @Cast("tensorflow::int64") long bytes_limit(); public native AllocatorStats bytes_limit(long bytes_limit); public AllocatorStats() { super((Pointer)null); allocate(); } private native void allocate(); public native void Clear(); public native @StdString BytePointer DebugString(); } // Allocator is an abstract interface for allocating and deallocating // device memory. @Namespace("tensorflow") public static class Allocator extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Allocator(Pointer p) { super(p); } // Align to 64 byte boundary. @MemberGetter public static native @Cast("const size_t") long kAllocatorAlignment(); public static final long kAllocatorAlignment = kAllocatorAlignment(); // Return a string identifying this allocator public native @StdString BytePointer Name(); // Return an uninitialized block of memory that is "num_bytes" bytes // in size. The returned pointer is guaranteed to be aligned to a // multiple of "alignment" bytes. // REQUIRES: "alignment" is a power of 2. public native Pointer AllocateRaw(@Cast("size_t") long alignment, @Cast("size_t") long num_bytes); // Return an uninitialized block of memory that is "num_bytes" bytes // in size with specified allocation attributes. The returned pointer is // guaranteed to be aligned to a multiple of "alignment" bytes. // REQUIRES: "alignment" is a power of 2. public native Pointer AllocateRaw(@Cast("size_t") long alignment, @Cast("size_t") long num_bytes, @Const @ByRef AllocationAttributes allocation_attr); // Deallocate a block of memory pointer to by "ptr" // REQUIRES: "ptr" was previously returned by a call to AllocateRaw public native void DeallocateRaw(Pointer ptr); // Convenience functions to do typed allocation. C++ constructors // and destructors are invoked for complex types if necessary, // depending on the concrete Allocator implementation. May return // NULL if the tensor has too many elements to represent in a single // allocation. // Returns true if this allocator tracks the sizes of allocations. // RequestedSize and AllocatedSize must be overridden if // TracksAllocationSizes is overridden to return true. public native @Cast("bool") boolean TracksAllocationSizes(); // Returns true if this allocator requires tensors with 0 elements // to allocate buffers. This is false for most allocators, but may // be used by special-case allocators that want to track tensor // usage. public native @Cast("bool") boolean ShouldAllocateEmptyTensors(); // Returns the user-requested size of the data allocated at // 'ptr'. Note that the actual buffer allocated might be larger // than requested, but this function returns the size requested by // the user. // // REQUIRES: TracksAllocationSizes() is true. // // REQUIRES: 'ptr!=nullptr' and points to a buffer previously // allocated by this allocator. public native @Cast("size_t") long RequestedSize(@Const Pointer ptr); // Returns the allocated size of the buffer at 'ptr' if known, // otherwise returns RequestedSize(ptr). AllocatedSize(ptr) is // guaranteed to be >= RequestedSize(ptr). // // REQUIRES: TracksAllocationSizes() is true. // // REQUIRES: 'ptr!=nullptr' and points to a buffer previously // allocated by this allocator. public native @Cast("size_t") long AllocatedSize(@Const Pointer ptr); // Returns either 0 or an identifier assigned to the buffer at 'ptr' // when the buffer was returned by AllocateRaw. If non-zero, the // identifier differs from every other ID assigned by this // allocator. // // REQUIRES: TracksAllocationSizes() is true. // // REQUIRES: 'ptr!=nullptr' and points to a buffer previously // allocated by this allocator. public native @Cast("tensorflow::int64") long AllocationId(@Const Pointer ptr); // Returns the allocated size of the buffer at 'ptr' if known, // otherwise returns 0. This method can be called when // TracksAllocationSizes() is false, but can be extremely slow. // // REQUIRES: 'ptr!=nullptr' and points to a buffer previously // allocated by this allocator. public native @Cast("size_t") long AllocatedSizeSlow(@Const Pointer ptr); // Fills in 'stats' with statistics collected by this allocator. public native void GetStats(AllocatorStats stats); // Clears the internal stats except for the `in_use` field. public native void ClearStats(); } // Allocator-specific constructors and destructors are used for // strings // An implementation of Allocator that delegates all calls to another Allocator. // // Useful to clients who want to override part of the functionality of another // allocator. @Namespace("tensorflow") @NoOffset public static class AllocatorWrapper extends Allocator { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AllocatorWrapper(Pointer p) { super(p); } public AllocatorWrapper(Allocator wrapped) { super((Pointer)null); allocate(wrapped); } private native void allocate(Allocator wrapped); // Returns the wrapped allocator to which all calls are delegated. public native Allocator wrapped(); public native @StdString BytePointer Name(); public native Pointer AllocateRaw(@Cast("size_t") long alignment, @Cast("size_t") long num_bytes); public native Pointer AllocateRaw(@Cast("size_t") long alignment, @Cast("size_t") long num_bytes, @Const @ByRef AllocationAttributes allocation_attr); public native void DeallocateRaw(Pointer ptr); public native @Cast("bool") boolean TracksAllocationSizes(); public native @Cast("bool") boolean ShouldAllocateEmptyTensors(); public native @Cast("size_t") long RequestedSize(@Const Pointer ptr); public native @Cast("size_t") long AllocatedSize(@Const Pointer ptr); public native @Cast("tensorflow::int64") long AllocationId(@Const Pointer ptr); public native @Cast("size_t") long AllocatedSizeSlow(@Const Pointer ptr); } // A tensorflow Op may need access to different kinds of memory that // are not simply a function of the device to which the Op has been // assigned. For example, an Op executing on a GPU may still need // to allocate CPU RAM for some purpose. Internal to the tensorflow // runtime we may choose to allocate CPU ram from special regions // that have been prepared for higher performance in some use // contexts, e.g. doing DMA with particular devices. For these // reasons, the Device interface does not expose just one memory // Allocator, but instead provides an accessor that takes a // specification of the desired memory attributes in order to select // an Allocator. // // Example use: // // Allocator for ordinary device memory: // Allocator* a = allocator(AllocatorAttributes()); // ... // // Allocator for CPU RAM, regardless of where Op is executing: // AllocatorAttributes attr; // attr.set_on_host(true); // Allocator* a = allocator(attr); @Namespace("tensorflow") public static class AllocatorAttributes extends Pointer { static { Loader.load(); } /** Default native constructor. */ public AllocatorAttributes() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public AllocatorAttributes(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AllocatorAttributes(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public AllocatorAttributes position(long position) { return (AllocatorAttributes)super.position(position); } public native void set_on_host(@Cast("bool") boolean v); public native @Cast("bool") boolean on_host(); public native void set_nic_compatible(@Cast("bool") boolean v); public native @Cast("bool") boolean nic_compatible(); public native void set_gpu_compatible(@Cast("bool") boolean v); public native @Cast("bool") boolean gpu_compatible(); public native void Merge(@ByVal AllocatorAttributes other); // Returns true if the fields set in *this is a subset of or equal to // those set in other. public native @Cast("bool") boolean IsEqualOrLessRestrictiveThan(@Const @ByRef AllocatorAttributes other); // NOTE: The upper 8 bits of the value are reserved for // device-specific uses. Implementors of a device can interpret these // upper 8 bits in device-specific ways, and ops implemented for those // devices are responsible for setting those 8 bits appropriately. public native int value(); public native AllocatorAttributes value(int value); // EXPERIMENTAL: If this is greater than zero, then allocation is delegated to // a named special-purpose allocator on the same device. public native int scope_id(); public native AllocatorAttributes scope_id(int scope_id); } // Returns a trivial implementation of Allocator, which is a process singleton. // Access through this function is only intended for use in tests and auxiliary // processing. Performance sensitive uses should always obtain allocators from // ProcessState. @Namespace("tensorflow") public static native Allocator cpu_allocator(); // If 'enable' is true, the default CPU allocator implementation will collect // AllocatorStats. By default, it's disabled. @Namespace("tensorflow") public static native void EnableCPUAllocatorStats(@Cast("bool") boolean enable); // If 'enable' is true, the default CPU allocator implementation will collect // full statistics. By default, it's disabled. @Namespace("tensorflow") public static native void EnableCPUAllocatorFullStats(@Cast("bool") boolean enable); // An object that does the underlying suballoc/free of memory for a higher-level // allocator. The expectation is that the higher-level allocator is doing some // kind of cache or pool management so that it will call SubAllocator::Alloc and // Free relatively infrequently, compared to the number of times its own // AllocateRaw and Free methods are called. @Namespace("tensorflow") @NoOffset public static class SubAllocator extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SubAllocator(Pointer p) { super(p); } // Visitor gets called with a pointer to a memory area and its // size in bytes. The index value will be numa_node for a CPU // allocator and GPU id for a GPU allocator. public native Pointer Alloc(@Cast("size_t") long alignment, @Cast("size_t") long num_bytes); public native void Free(Pointer ptr, @Cast("size_t") long num_bytes); } // namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_ALLOCATOR_H_ // Parsed from tensorflow/core/framework/tensor_shape.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/framework/tensor_shape.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2ftensor_5fshape_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2ftensor_5fshape_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fframework_2ftensor_5fshape_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fframework_2ftensor_5fshape_2eproto // namespace tensorflow // namespace protobuf // namespace google // =================================================================== @Namespace("tensorflow") @NoOffset public static class TensorShapeProto_Dim extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorShapeProto_Dim(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TensorShapeProto_Dim(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public TensorShapeProto_Dim position(long position) { return (TensorShapeProto_Dim)super.position(position); } public TensorShapeProto_Dim() { super((Pointer)null); allocate(); } private native void allocate(); public TensorShapeProto_Dim(@Const @ByRef TensorShapeProto_Dim from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef TensorShapeProto_Dim from); public native @ByRef @Name("operator =") TensorShapeProto_Dim put(@Const @ByRef TensorShapeProto_Dim from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef TensorShapeProto_Dim default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const TensorShapeProto_Dim internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(TensorShapeProto_Dim other); public native void Swap(TensorShapeProto_Dim other); // implements Message ---------------------------------------------- public native TensorShapeProto_Dim New(); public native TensorShapeProto_Dim New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef TensorShapeProto_Dim from); public native void MergeFrom(@Const @ByRef TensorShapeProto_Dim from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // string name = 2; public native void clear_name(); @MemberGetter public static native int kNameFieldNumber(); public static final int kNameFieldNumber = kNameFieldNumber(); public native @StdString BytePointer name(); public native void set_name(@StdString BytePointer value); public native void set_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name(); public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_name(); public native @Deprecated void unsafe_arena_set_allocated_name( @StdString @Cast({"char*", "std::string*"}) BytePointer name); // int64 size = 1; public native void clear_size(); @MemberGetter public static native int kSizeFieldNumber(); public static final int kSizeFieldNumber = kSizeFieldNumber(); public native @Cast("google::protobuf::int64") long size(); public native void set_size(@Cast("google::protobuf::int64") long value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class TensorShapeProto extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorShapeProto(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TensorShapeProto(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public TensorShapeProto position(long position) { return (TensorShapeProto)super.position(position); } public TensorShapeProto() { super((Pointer)null); allocate(); } private native void allocate(); public TensorShapeProto(@Const @ByRef TensorShapeProto from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef TensorShapeProto from); public native @ByRef @Name("operator =") TensorShapeProto put(@Const @ByRef TensorShapeProto from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef TensorShapeProto default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const TensorShapeProto internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(TensorShapeProto other); public native void Swap(TensorShapeProto other); // implements Message ---------------------------------------------- public native TensorShapeProto New(); public native TensorShapeProto New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef TensorShapeProto from); public native void MergeFrom(@Const @ByRef TensorShapeProto from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.TensorShapeProto.Dim dim = 2; public native int dim_size(); public native void clear_dim(); @MemberGetter public static native int kDimFieldNumber(); public static final int kDimFieldNumber = kDimFieldNumber(); public native TensorShapeProto_Dim mutable_dim(int index); public native @Const @ByRef TensorShapeProto_Dim dim(int index); public native TensorShapeProto_Dim add_dim(); // bool unknown_rank = 3; public native void clear_unknown_rank(); @MemberGetter public static native int kUnknownRankFieldNumber(); public static final int kUnknownRankFieldNumber = kUnknownRankFieldNumber(); public native @Cast("bool") boolean unknown_rank(); public native void set_unknown_rank(@Cast("bool") boolean value); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // TensorShapeProto_Dim // int64 size = 1; // string name = 2; // #if LANG_CXX11 // #endif // ------------------------------------------------------------------- // TensorShapeProto // repeated .tensorflow.TensorShapeProto.Dim dim = 2; // bool unknown_rank = 3; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2ftensor_5fshape_2eproto // Parsed from tensorflow/core/framework/types.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/framework/types.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2ftypes_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2ftypes_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fframework_2ftypes_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fframework_2ftypes_2eproto // namespace tensorflow /** enum tensorflow::DataType */ public static final int DT_INVALID = 0, DT_FLOAT = 1, DT_DOUBLE = 2, DT_INT32 = 3, DT_UINT8 = 4, DT_INT16 = 5, DT_INT8 = 6, DT_STRING = 7, DT_COMPLEX64 = 8, DT_INT64 = 9, DT_BOOL = 10, DT_QINT8 = 11, DT_QUINT8 = 12, DT_QINT32 = 13, DT_BFLOAT16 = 14, DT_QINT16 = 15, DT_QUINT16 = 16, DT_UINT16 = 17, DT_COMPLEX128 = 18, DT_HALF = 19, DT_RESOURCE = 20, DT_VARIANT = 21, DT_UINT32 = 22, DT_UINT64 = 23, DT_FLOAT_REF = 101, DT_DOUBLE_REF = 102, DT_INT32_REF = 103, DT_UINT8_REF = 104, DT_INT16_REF = 105, DT_INT8_REF = 106, DT_STRING_REF = 107, DT_COMPLEX64_REF = 108, DT_INT64_REF = 109, DT_BOOL_REF = 110, DT_QINT8_REF = 111, DT_QUINT8_REF = 112, DT_QINT32_REF = 113, DT_BFLOAT16_REF = 114, DT_QINT16_REF = 115, DT_QUINT16_REF = 116, DT_UINT16_REF = 117, DT_COMPLEX128_REF = 118, DT_HALF_REF = 119, DT_RESOURCE_REF = 120, DT_VARIANT_REF = 121, DT_UINT32_REF = 122, DT_UINT64_REF = 123, DataType_INT_MIN_SENTINEL_DO_NOT_USE_ = kint32min, DataType_INT_MAX_SENTINEL_DO_NOT_USE_ = kint32max; @Namespace("tensorflow") public static native @Cast("bool") boolean DataType_IsValid(int value); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::DataType") int DataType_MIN(); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::DataType") int DataType_MAX(); @Namespace("tensorflow") @MemberGetter public static native int DataType_ARRAYSIZE(); @Namespace("tensorflow") public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer DataType_descriptor(); @Namespace("tensorflow") public static native @StdString BytePointer DataType_Name(@Cast("tensorflow::DataType") int value); @Namespace("tensorflow") public static native @Cast("bool") boolean DataType_Parse( @StdString BytePointer name, @Cast("tensorflow::DataType*") IntPointer value); @Namespace("tensorflow") public static native @Cast("bool") boolean DataType_Parse( @StdString String name, @Cast("tensorflow::DataType*") IntPointer value); // =================================================================== // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // namespace protobuf // namespace google // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2ftypes_2eproto // Parsed from tensorflow/core/framework/resource_handle.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/framework/resource_handle.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fresource_5fhandle_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fresource_5fhandle_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fframework_2fresource_5fhandle_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fframework_2fresource_5fhandle_2eproto // namespace tensorflow // namespace protobuf // namespace google // =================================================================== @Namespace("tensorflow") @NoOffset public static class ResourceHandleProto extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceHandleProto(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public ResourceHandleProto(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public ResourceHandleProto position(long position) { return (ResourceHandleProto)super.position(position); } public ResourceHandleProto() { super((Pointer)null); allocate(); } private native void allocate(); public ResourceHandleProto(@Const @ByRef ResourceHandleProto from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef ResourceHandleProto from); public native @ByRef @Name("operator =") ResourceHandleProto put(@Const @ByRef ResourceHandleProto from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef ResourceHandleProto default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const ResourceHandleProto internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(ResourceHandleProto other); public native void Swap(ResourceHandleProto other); // implements Message ---------------------------------------------- public native ResourceHandleProto New(); public native ResourceHandleProto New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef ResourceHandleProto from); public native void MergeFrom(@Const @ByRef ResourceHandleProto from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // string device = 1; public native void clear_device(); @MemberGetter public static native int kDeviceFieldNumber(); public static final int kDeviceFieldNumber = kDeviceFieldNumber(); public native @StdString BytePointer device(); public native void set_device(@StdString BytePointer value); public native void set_device(@StdString String value); // #if LANG_CXX11 // #endif public native void set_device(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_device(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_device(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_device(); public native void set_allocated_device(@StdString @Cast({"char*", "std::string*"}) BytePointer device); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_device(); public native @Deprecated void unsafe_arena_set_allocated_device( @StdString @Cast({"char*", "std::string*"}) BytePointer device); // string container = 2; public native void clear_container(); @MemberGetter public static native int kContainerFieldNumber(); public static final int kContainerFieldNumber = kContainerFieldNumber(); public native @StdString BytePointer container(); public native void set_container(@StdString BytePointer value); public native void set_container(@StdString String value); // #if LANG_CXX11 // #endif public native void set_container(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_container(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_container(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_container(); public native void set_allocated_container(@StdString @Cast({"char*", "std::string*"}) BytePointer container); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_container(); public native @Deprecated void unsafe_arena_set_allocated_container( @StdString @Cast({"char*", "std::string*"}) BytePointer container); // string name = 3; public native void clear_name(); @MemberGetter public static native int kNameFieldNumber(); public static final int kNameFieldNumber = kNameFieldNumber(); public native @StdString BytePointer name(); public native void set_name(@StdString BytePointer value); public native void set_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name(); public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_name(); public native @Deprecated void unsafe_arena_set_allocated_name( @StdString @Cast({"char*", "std::string*"}) BytePointer name); // string maybe_type_name = 5; public native void clear_maybe_type_name(); @MemberGetter public static native int kMaybeTypeNameFieldNumber(); public static final int kMaybeTypeNameFieldNumber = kMaybeTypeNameFieldNumber(); public native @StdString BytePointer maybe_type_name(); public native void set_maybe_type_name(@StdString BytePointer value); public native void set_maybe_type_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_maybe_type_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_maybe_type_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_maybe_type_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_maybe_type_name(); public native void set_allocated_maybe_type_name(@StdString @Cast({"char*", "std::string*"}) BytePointer maybe_type_name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_maybe_type_name(); public native @Deprecated void unsafe_arena_set_allocated_maybe_type_name( @StdString @Cast({"char*", "std::string*"}) BytePointer maybe_type_name); // uint64 hash_code = 4; public native void clear_hash_code(); @MemberGetter public static native int kHashCodeFieldNumber(); public static final int kHashCodeFieldNumber = kHashCodeFieldNumber(); public native @Cast("google::protobuf::uint64") long hash_code(); public native void set_hash_code(@Cast("google::protobuf::uint64") long value); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // ResourceHandleProto // string device = 1; // #if LANG_CXX11 // #endif // string container = 2; // #if LANG_CXX11 // #endif // string name = 3; // #if LANG_CXX11 // #endif // uint64 hash_code = 4; // string maybe_type_name = 5; // #if LANG_CXX11 // #endif // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fresource_5fhandle_2eproto // Parsed from tensorflow/core/framework/tensor.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/framework/tensor.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2ftensor_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2ftensor_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // #include "tensorflow/core/framework/resource_handle.pb.h" // #include "tensorflow/core/framework/tensor_shape.pb.h" // #include "tensorflow/core/framework/types.pb.h" // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fframework_2ftensor_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fframework_2ftensor_2eproto // namespace tensorflow // namespace protobuf // namespace google // =================================================================== @Namespace("tensorflow") @NoOffset public static class TensorProto extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorProto(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TensorProto(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public TensorProto position(long position) { return (TensorProto)super.position(position); } public TensorProto() { super((Pointer)null); allocate(); } private native void allocate(); public TensorProto(@Const @ByRef TensorProto from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef TensorProto from); public native @ByRef @Name("operator =") TensorProto put(@Const @ByRef TensorProto from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef TensorProto default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const TensorProto internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(TensorProto other); public native void Swap(TensorProto other); // implements Message ---------------------------------------------- public native TensorProto New(); public native TensorProto New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef TensorProto from); public native void MergeFrom(@Const @ByRef TensorProto from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated float float_val = 5 [packed = true]; public native int float_val_size(); public native void clear_float_val(); @MemberGetter public static native int kFloatValFieldNumber(); public static final int kFloatValFieldNumber = kFloatValFieldNumber(); public native float float_val(int index); public native void set_float_val(int index, float value); public native void add_float_val(float value); // repeated double double_val = 6 [packed = true]; public native int double_val_size(); public native void clear_double_val(); @MemberGetter public static native int kDoubleValFieldNumber(); public static final int kDoubleValFieldNumber = kDoubleValFieldNumber(); public native double double_val(int index); public native void set_double_val(int index, double value); public native void add_double_val(double value); // repeated int32 int_val = 7 [packed = true]; public native int int_val_size(); public native void clear_int_val(); @MemberGetter public static native int kIntValFieldNumber(); public static final int kIntValFieldNumber = kIntValFieldNumber(); public native @Cast("google::protobuf::int32") int int_val(int index); public native void set_int_val(int index, @Cast("google::protobuf::int32") int value); public native void add_int_val(@Cast("google::protobuf::int32") int value); // repeated bytes string_val = 8; public native int string_val_size(); public native void clear_string_val(); @MemberGetter public static native int kStringValFieldNumber(); public static final int kStringValFieldNumber = kStringValFieldNumber(); public native @StdString BytePointer string_val(int index); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_string_val(int index); public native void set_string_val(int index, @StdString BytePointer value); public native void set_string_val(int index, @StdString String value); // #if LANG_CXX11 // #endif public native void set_string_val(int index, @Const Pointer value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_string_val(); public native void add_string_val(@StdString BytePointer value); public native void add_string_val(@StdString String value); // #if LANG_CXX11 // #endif public native void add_string_val(@Const Pointer value, @Cast("size_t") long size); // repeated float scomplex_val = 9 [packed = true]; public native int scomplex_val_size(); public native void clear_scomplex_val(); @MemberGetter public static native int kScomplexValFieldNumber(); public static final int kScomplexValFieldNumber = kScomplexValFieldNumber(); public native float scomplex_val(int index); public native void set_scomplex_val(int index, float value); public native void add_scomplex_val(float value); // repeated int64 int64_val = 10 [packed = true]; public native int int64_val_size(); public native void clear_int64_val(); @MemberGetter public static native int kInt64ValFieldNumber(); public static final int kInt64ValFieldNumber = kInt64ValFieldNumber(); public native @Cast("google::protobuf::int64") long int64_val(int index); public native void set_int64_val(int index, @Cast("google::protobuf::int64") long value); public native void add_int64_val(@Cast("google::protobuf::int64") long value); // repeated bool bool_val = 11 [packed = true]; public native int bool_val_size(); public native void clear_bool_val(); @MemberGetter public static native int kBoolValFieldNumber(); public static final int kBoolValFieldNumber = kBoolValFieldNumber(); public native @Cast("bool") boolean bool_val(int index); public native void set_bool_val(int index, @Cast("bool") boolean value); public native void add_bool_val(@Cast("bool") boolean value); // repeated double dcomplex_val = 12 [packed = true]; public native int dcomplex_val_size(); public native void clear_dcomplex_val(); @MemberGetter public static native int kDcomplexValFieldNumber(); public static final int kDcomplexValFieldNumber = kDcomplexValFieldNumber(); public native double dcomplex_val(int index); public native void set_dcomplex_val(int index, double value); public native void add_dcomplex_val(double value); // repeated int32 half_val = 13 [packed = true]; public native int half_val_size(); public native void clear_half_val(); @MemberGetter public static native int kHalfValFieldNumber(); public static final int kHalfValFieldNumber = kHalfValFieldNumber(); public native @Cast("google::protobuf::int32") int half_val(int index); public native void set_half_val(int index, @Cast("google::protobuf::int32") int value); public native void add_half_val(@Cast("google::protobuf::int32") int value); // repeated .tensorflow.ResourceHandleProto resource_handle_val = 14; public native int resource_handle_val_size(); public native void clear_resource_handle_val(); @MemberGetter public static native int kResourceHandleValFieldNumber(); public static final int kResourceHandleValFieldNumber = kResourceHandleValFieldNumber(); public native ResourceHandleProto mutable_resource_handle_val(int index); public native @Const @ByRef ResourceHandleProto resource_handle_val(int index); public native ResourceHandleProto add_resource_handle_val(); // repeated .tensorflow.VariantTensorDataProto variant_val = 15; public native int variant_val_size(); public native void clear_variant_val(); @MemberGetter public static native int kVariantValFieldNumber(); public static final int kVariantValFieldNumber = kVariantValFieldNumber(); public native VariantTensorDataProto mutable_variant_val(int index); public native @Const @ByRef VariantTensorDataProto variant_val(int index); public native VariantTensorDataProto add_variant_val(); // repeated uint32 uint32_val = 16 [packed = true]; public native int uint32_val_size(); public native void clear_uint32_val(); @MemberGetter public static native int kUint32ValFieldNumber(); public static final int kUint32ValFieldNumber = kUint32ValFieldNumber(); public native @Cast("google::protobuf::uint32") int uint32_val(int index); public native void set_uint32_val(int index, @Cast("google::protobuf::uint32") int value); public native void add_uint32_val(@Cast("google::protobuf::uint32") int value); // repeated uint64 uint64_val = 17 [packed = true]; public native int uint64_val_size(); public native void clear_uint64_val(); @MemberGetter public static native int kUint64ValFieldNumber(); public static final int kUint64ValFieldNumber = kUint64ValFieldNumber(); public native @Cast("google::protobuf::uint64") long uint64_val(int index); public native void set_uint64_val(int index, @Cast("google::protobuf::uint64") long value); public native void add_uint64_val(@Cast("google::protobuf::uint64") long value); // bytes tensor_content = 4; public native void clear_tensor_content(); @MemberGetter public static native int kTensorContentFieldNumber(); public static final int kTensorContentFieldNumber = kTensorContentFieldNumber(); public native @StdString BytePointer tensor_content(); public native void set_tensor_content(@StdString BytePointer value); public native void set_tensor_content(@StdString String value); // #if LANG_CXX11 // #endif public native void set_tensor_content(@Const Pointer value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_tensor_content(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_tensor_content(); public native void set_allocated_tensor_content(@StdString @Cast({"char*", "std::string*"}) BytePointer tensor_content); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_tensor_content(); public native @Deprecated void unsafe_arena_set_allocated_tensor_content( @StdString @Cast({"char*", "std::string*"}) BytePointer tensor_content); // .tensorflow.TensorShapeProto tensor_shape = 2; public native @Cast("bool") boolean has_tensor_shape(); public native void clear_tensor_shape(); @MemberGetter public static native int kTensorShapeFieldNumber(); public static final int kTensorShapeFieldNumber = kTensorShapeFieldNumber(); public native @Const @ByRef TensorShapeProto tensor_shape(); public native TensorShapeProto release_tensor_shape(); public native TensorShapeProto mutable_tensor_shape(); public native void set_allocated_tensor_shape(TensorShapeProto tensor_shape); public native void unsafe_arena_set_allocated_tensor_shape( TensorShapeProto tensor_shape); public native TensorShapeProto unsafe_arena_release_tensor_shape(); // .tensorflow.DataType dtype = 1; public native void clear_dtype(); @MemberGetter public static native int kDtypeFieldNumber(); public static final int kDtypeFieldNumber = kDtypeFieldNumber(); public native @Cast("tensorflow::DataType") int dtype(); public native void set_dtype(@Cast("tensorflow::DataType") int value); // int32 version_number = 3; public native void clear_version_number(); @MemberGetter public static native int kVersionNumberFieldNumber(); public static final int kVersionNumberFieldNumber = kVersionNumberFieldNumber(); public native @Cast("google::protobuf::int32") int version_number(); public native void set_version_number(@Cast("google::protobuf::int32") int value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class VariantTensorDataProto extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VariantTensorDataProto(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public VariantTensorDataProto(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public VariantTensorDataProto position(long position) { return (VariantTensorDataProto)super.position(position); } public VariantTensorDataProto() { super((Pointer)null); allocate(); } private native void allocate(); public VariantTensorDataProto(@Const @ByRef VariantTensorDataProto from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef VariantTensorDataProto from); public native @ByRef @Name("operator =") VariantTensorDataProto put(@Const @ByRef VariantTensorDataProto from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef VariantTensorDataProto default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const VariantTensorDataProto internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(VariantTensorDataProto other); public native void Swap(VariantTensorDataProto other); // implements Message ---------------------------------------------- public native VariantTensorDataProto New(); public native VariantTensorDataProto New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef VariantTensorDataProto from); public native void MergeFrom(@Const @ByRef VariantTensorDataProto from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.TensorProto tensors = 3; public native int tensors_size(); public native void clear_tensors(); @MemberGetter public static native int kTensorsFieldNumber(); public static final int kTensorsFieldNumber = kTensorsFieldNumber(); public native TensorProto mutable_tensors(int index); public native @Const @ByRef TensorProto tensors(int index); public native TensorProto add_tensors(); // string type_name = 1; public native void clear_type_name(); @MemberGetter public static native int kTypeNameFieldNumber(); public static final int kTypeNameFieldNumber = kTypeNameFieldNumber(); public native @StdString BytePointer type_name(); public native void set_type_name(@StdString BytePointer value); public native void set_type_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_type_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_type_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_type_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_type_name(); public native void set_allocated_type_name(@StdString @Cast({"char*", "std::string*"}) BytePointer type_name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_type_name(); public native @Deprecated void unsafe_arena_set_allocated_type_name( @StdString @Cast({"char*", "std::string*"}) BytePointer type_name); // bytes metadata = 2; public native void clear_metadata(); @MemberGetter public static native int kMetadataFieldNumber(); public static final int kMetadataFieldNumber = kMetadataFieldNumber(); public native @StdString BytePointer metadata(); public native void set_metadata(@StdString BytePointer value); public native void set_metadata(@StdString String value); // #if LANG_CXX11 // #endif public native void set_metadata(@Const Pointer value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_metadata(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_metadata(); public native void set_allocated_metadata(@StdString @Cast({"char*", "std::string*"}) BytePointer metadata); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_metadata(); public native @Deprecated void unsafe_arena_set_allocated_metadata( @StdString @Cast({"char*", "std::string*"}) BytePointer metadata); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // TensorProto // .tensorflow.DataType dtype = 1; // .tensorflow.TensorShapeProto tensor_shape = 2; // int32 version_number = 3; // bytes tensor_content = 4; // #if LANG_CXX11 // #endif // repeated int32 half_val = 13 [packed = true]; // repeated float float_val = 5 [packed = true]; // repeated double double_val = 6 [packed = true]; // repeated int32 int_val = 7 [packed = true]; // repeated bytes string_val = 8; // #if LANG_CXX11 // #endif // #if LANG_CXX11 // #endif // repeated float scomplex_val = 9 [packed = true]; // repeated int64 int64_val = 10 [packed = true]; // repeated bool bool_val = 11 [packed = true]; // repeated double dcomplex_val = 12 [packed = true]; // repeated .tensorflow.ResourceHandleProto resource_handle_val = 14; // repeated .tensorflow.VariantTensorDataProto variant_val = 15; // repeated uint32 uint32_val = 16 [packed = true]; // repeated uint64 uint64_val = 17 [packed = true]; // ------------------------------------------------------------------- // VariantTensorDataProto // string type_name = 1; // #if LANG_CXX11 // #endif // bytes metadata = 2; // #if LANG_CXX11 // #endif // repeated .tensorflow.TensorProto tensors = 3; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2ftensor_2eproto // Parsed from tensorflow/core/framework/tensor_description.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/framework/tensor_description.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2ftensor_5fdescription_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2ftensor_5fdescription_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // #include "tensorflow/core/framework/types.pb.h" // #include "tensorflow/core/framework/tensor_shape.pb.h" // #include "tensorflow/core/framework/allocation_description.pb.h" // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fframework_2ftensor_5fdescription_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fframework_2ftensor_5fdescription_2eproto // namespace tensorflow // namespace protobuf // namespace google // =================================================================== @Namespace("tensorflow") @NoOffset public static class TensorDescription extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorDescription(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TensorDescription(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public TensorDescription position(long position) { return (TensorDescription)super.position(position); } public TensorDescription() { super((Pointer)null); allocate(); } private native void allocate(); public TensorDescription(@Const @ByRef TensorDescription from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef TensorDescription from); public native @ByRef @Name("operator =") TensorDescription put(@Const @ByRef TensorDescription from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef TensorDescription default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const TensorDescription internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(TensorDescription other); public native void Swap(TensorDescription other); // implements Message ---------------------------------------------- public native TensorDescription New(); public native TensorDescription New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef TensorDescription from); public native void MergeFrom(@Const @ByRef TensorDescription from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // .tensorflow.TensorShapeProto shape = 2; public native @Cast("bool") boolean has_shape(); public native void clear_shape(); @MemberGetter public static native int kShapeFieldNumber(); public static final int kShapeFieldNumber = kShapeFieldNumber(); public native @Const @ByRef TensorShapeProto shape(); public native TensorShapeProto release_shape(); public native TensorShapeProto mutable_shape(); public native void set_allocated_shape(TensorShapeProto shape); public native void unsafe_arena_set_allocated_shape( TensorShapeProto shape); public native TensorShapeProto unsafe_arena_release_shape(); // .tensorflow.AllocationDescription allocation_description = 4; public native @Cast("bool") boolean has_allocation_description(); public native void clear_allocation_description(); @MemberGetter public static native int kAllocationDescriptionFieldNumber(); public static final int kAllocationDescriptionFieldNumber = kAllocationDescriptionFieldNumber(); public native @Const @ByRef AllocationDescription allocation_description(); public native AllocationDescription release_allocation_description(); public native AllocationDescription mutable_allocation_description(); public native void set_allocated_allocation_description(AllocationDescription allocation_description); public native void unsafe_arena_set_allocated_allocation_description( AllocationDescription allocation_description); public native AllocationDescription unsafe_arena_release_allocation_description(); // .tensorflow.DataType dtype = 1; public native void clear_dtype(); @MemberGetter public static native int kDtypeFieldNumber(); public static final int kDtypeFieldNumber = kDtypeFieldNumber(); public native @Cast("tensorflow::DataType") int dtype(); public native void set_dtype(@Cast("tensorflow::DataType") int value); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // TensorDescription // .tensorflow.DataType dtype = 1; // .tensorflow.TensorShapeProto shape = 2; // .tensorflow.AllocationDescription allocation_description = 4; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2ftensor_5fdescription_2eproto // Parsed from tensorflow/core/framework/tensor_types.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_TENSOR_TYPES_H_ // #define TENSORFLOW_CORE_FRAMEWORK_TENSOR_TYPES_H_ // #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" // Helper to define Tensor types given that the scalar is of type T. // namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_TENSOR_TYPES_H_ // Parsed from tensorflow/core/framework/tensor_shape.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_TENSOR_SHAPE_H_ // #define TENSORFLOW_CORE_FRAMEWORK_TENSOR_SHAPE_H_ // #include // #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" // #include "tensorflow/core/framework/types.pb.h" // #include "tensorflow/core/lib/core/errors.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/lib/core/stringpiece.h" // #include "tensorflow/core/lib/gtl/array_slice.h" // #include "tensorflow/core/lib/gtl/inlined_vector.h" // #include "tensorflow/core/lib/strings/str_util.h" // #include "tensorflow/core/platform/logging.h" // START_SKIP_DOXYGEN // END_SKIP_DOXYGEN /** Internal representation for both TensorShape and PartialTensorShape. */ @Namespace("tensorflow") @NoOffset public static class TensorShapeRep extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorShapeRep(Pointer p) { super(p); } /** Copy the specified shape */ public TensorShapeRep(@Const @ByRef TensorShapeRep b) { super((Pointer)null); allocate(b); } private native void allocate(@Const @ByRef TensorShapeRep b); public native @Name("operator =") void put(@Const @ByRef TensorShapeRep b); /** Move the specified shape. After moving, is safe for destruction and */ // can be reassigned into, but its dimensions and number of elements can be // nonsensical (e.g., negative dimension sizes, or number of elements not // properly recomputed). /** Clear a tensor shape, producing the scalar shape. */ public native void Clear(); // Maximum number of dimensions in a tensor. // It's 254 because 255 = kUnknownRank is used to represent unknown rank. /// public static native int MaxDimensions(); /** \brief Returns the number of elements in the tensor. * * We use {@code int64} and not {@code size_t} to be compatible with {@code Eigen::Tensor} * which uses {@code ptrdiff_t}. For PartialTensorShape, -1 means not fully * defined. */ public native @Cast("tensorflow::int64") long num_elements(); /** For error messages. */ public native @StdString BytePointer DebugString(); public static native @StdString BytePointer DebugString(@Const @ByRef TensorShapeProto proto); public native void DumpRep(); } /** Base class for TensorShape and PartialTensorShape. * The class is templatized by either TensorShape or PartialTensorShape to * allow skipping known/unknown checks in the TensorShape case, but the * representation is shared exactly for fast conversion. */ @Name("tensorflow::TensorShapeBase") @NoOffset public static class TensorShapeBase extends TensorShapeRep { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorShapeBase(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TensorShapeBase(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public TensorShapeBase position(long position) { return (TensorShapeBase)super.position(position); } /** \brief Construct a {@code TensorShapeBase} from the provided sizes. * REQUIRES: {@code dim_sizes[i] >= 0} (or >= -1 for PartialTensorShape) */ public TensorShapeBase(@Cast("tensorflow::int64*") @ArraySlice LongPointer dim_sizes) { super((Pointer)null); allocate(dim_sizes); } private native void allocate(@Cast("tensorflow::int64*") @ArraySlice LongPointer dim_sizes); public TensorShapeBase(@Cast("tensorflow::int64*") @ArraySlice LongBuffer dim_sizes) { super((Pointer)null); allocate(dim_sizes); } private native void allocate(@Cast("tensorflow::int64*") @ArraySlice LongBuffer dim_sizes); public TensorShapeBase(@Cast("tensorflow::int64*") @ArraySlice long... dim_sizes) { super((Pointer)null); allocate(dim_sizes); } private native void allocate(@Cast("tensorflow::int64*") @ArraySlice long... dim_sizes); /** Construct an empty TensorShape, or an unknown rank PartialTensorShape */ public TensorShapeBase() { super((Pointer)null); allocate(); } private native void allocate(); public TensorShapeBase(@Const @ByRef TensorShapeProto proto) { super((Pointer)null); allocate(proto); } private native void allocate(@Const @ByRef TensorShapeProto proto); /** Returns {@code true} iff {@code proto} is a valid tensor shape. */ // For TensorShape, the proto shape must be fully defined. public static native @Cast("bool") boolean IsValid(@Const @ByRef TensorShapeProto proto); /** Returns {@code OK} iff {@code proto} is a valid tensor shape, and a descriptive error * status otherwise. */ public static native @ByVal Status IsValidShape(@Const @ByRef TensorShapeProto proto); /** \brief Add a dimension to the end ("inner-most"). * REQUIRES: {@code size >= 0} */ public native void AddDim(@Cast("tensorflow::int64") long size); /** Appends all the dimensions from {@code shape}. */ public native void AppendShape(@Const @ByRef TensorShapeBase shape); /** \brief Insert a dimension somewhere in the {@code TensorShape}. * REQUIRES: {@code 0 <= d <= dims()} * REQUIRES: {@code size >= 0} */ public native void InsertDim(int d, @Cast("tensorflow::int64") long size); /** \brief Modifies the size of the dimension {@code d} to be {@code size} * REQUIRES: {@code 0 <= d < dims()} * REQUIRES: {@code size >= 0} */ public native void set_dim(int d, @Cast("tensorflow::int64") long size); /** \brief Removes dimension {@code d} from the {@code TensorShape}. * REQUIRES: {@code 0 <= d < dims()} */ public native void RemoveDim(int d); /** \brief Removes last {@code n} dimensions from the {@code TensorShape}. * REQUIRES: {@code 0 <= n <= dims()} */ public native void RemoveLastDims(int n); /** \brief Removes the dimensions in range {@code [begin:end)} from {@code TensorShape}. * Negative values of {@code end} are interpreted as {@code dims() + end + 1} (as in * Python). The same is true for negative values of {@code begin}. REQUIRES: * {@code -(dims()+1) <= begin <= dims()} REQUIRES: {@code -(dims()+1) <= end <= dims()} */ public native void RemoveDimRange(int begin, int end); /** Return whether the rank is unknown */ public native @Cast("bool") boolean unknown_rank(); /** Return the number of dimensions in the tensor. * Can be -1 meaning unknown rank for PartialTensorShape. */ public native int dims(); /** \brief Returns the number of elements in dimension {@code d}. * REQUIRES: {@code 0 <= d < dims()} */ // TODO(touts): Rename to `dimension()` to match // `Eigen::Tensor::dimension()`? public native @Cast("tensorflow::int64") long dim_size(int d); /** Returns sizes of all dimensions. */ // Returns an empty list for unknown rank PartialTensorShape. public native @ByVal LongVector dim_sizes(); /** Return true iff the rank and all of the dimensions are well defined */ // TODO(irving): Rename to is_fully_defined now that it's fast. public native @Cast("bool") boolean IsFullyDefined(); /** Fill {@code *proto} from {@code *this}. */ public native void AsProto(TensorShapeProto proto); /** For iterating through the dimensions. */ public native @ByVal TensorShapeIter begin(); public native @ByVal TensorShapeIter end(); } /** Outputs {@code TensorShapeBase} to {@code std::ostream}. */ /** Represents the shape of a Tensor. * * A tensor's shape is denoted by its number of dimensions and a size for each * dimension. For example, a Tensor represented by a 3 x 4 matrix would have * a shape of 2-D, [3,4]. * * If you know the exact shape of your Tensor when you create the TensorShape * object, you can specify it then, or you can create a TensorShape with * zero dimensions and one element, and call AddDim() to add dimensions later. */ @Namespace("tensorflow") public static class TensorShape extends TensorShapeBase { static { Loader.load(); } public TensorShape(@Cast("tensorflow::int64*") @ArraySlice LongPointer dim_sizes) { super((Pointer)null); allocate(dim_sizes); } private native void allocate(@Cast("tensorflow::int64*") @ArraySlice LongPointer dim_sizes); public TensorShape(@Cast("tensorflow::int64*") @ArraySlice LongBuffer dim_sizes) { super((Pointer)null); allocate(dim_sizes); } private native void allocate(@Cast("tensorflow::int64*") @ArraySlice LongBuffer dim_sizes); public TensorShape(@Cast("tensorflow::int64*") @ArraySlice long... dim_sizes) { super((Pointer)null); allocate(dim_sizes); } private native void allocate(@Cast("tensorflow::int64*") @ArraySlice long... dim_sizes); public TensorShape() { super((Pointer)null); allocate(); } private native void allocate(); public TensorShape(@Const @ByRef TensorShapeProto proto) { super((Pointer)null); allocate(proto); } private native void allocate(@Const @ByRef TensorShapeProto proto); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorShape(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TensorShape(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public TensorShape position(long position) { return (TensorShape)super.position(position); } /** Allow a TensorShape to be used as a PartialTensorShape without copying */ public native @Const @ByRef @Name("operator const tensorflow::PartialTensorShape&") PartialTensorShape asPartialTensorShape(); // NOLINT(runtime/explicit) /** Returns true if {@code *this} and {@code b} have the same sizes. Ignores * dimension names. */ public native @Cast("bool") boolean IsSameSize(@Const @ByRef TensorShape b); public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef TensorShape b); public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef TensorShape b); /** Fill {@code *dsizes} from {@code *this}. */ /** Same as {@code AsEigenDSizes()} but allows for {@code NDIMS > dims()} -- in * which case we pad the rest of the sizes with 1. */ // These CHECK fail to ease debugging. // REQUIRES: dims() == NDIMS } /** Represents the value of one dimension in a TensorShape. */ @Namespace("tensorflow") @NoOffset public static class TensorShapeDim extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorShapeDim(Pointer p) { super(p); } public TensorShapeDim(@Cast("tensorflow::int64") long s) { super((Pointer)null); allocate(s); } private native void allocate(@Cast("tensorflow::int64") long s); public native @Cast("tensorflow::int64") long size(); public native TensorShapeDim size(long size); } // START_SKIP_DOXYGEN @Name("tensorflow::TensorShapeIter") @NoOffset public static class TensorShapeIter extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorShapeIter(Pointer p) { super(p); } public TensorShapeIter(@Const TensorShape shape, int d) { super((Pointer)null); allocate(shape, d); } private native void allocate(@Const TensorShape shape, int d); public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef TensorShapeIter rhs); public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef TensorShapeIter rhs); public native @Name("operator ++") void increment(); public native @ByVal @Name("operator *") TensorShapeDim multiply(); } // END_SKIP_DOXYGEN /** \brief Static helper routines for {@code TensorShape}. Includes a few common * predicates on a tensor shape. */ @Namespace("tensorflow") public static class TensorShapeUtils extends Pointer { static { Loader.load(); } /** Default native constructor. */ public TensorShapeUtils() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TensorShapeUtils(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorShapeUtils(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public TensorShapeUtils position(long position) { return (TensorShapeUtils)super.position(position); } public static native @Cast("bool") boolean IsScalar(@Const @ByRef TensorShape shape); public static native @Cast("bool") boolean IsVector(@Const @ByRef TensorShape shape); public static native @Cast("bool") boolean IsVectorOrHigher(@Const @ByRef TensorShape shape); public static native @Cast("bool") boolean IsMatrix(@Const @ByRef TensorShape shape); public static native @Cast("bool") boolean IsSquareMatrix(@Const @ByRef TensorShape shape); public static native @Cast("bool") boolean IsMatrixOrHigher(@Const @ByRef TensorShape shape); /** \brief Returns a {@code TensorShape} whose dimensions are * {@code dims[0]}, {@code dims[1]}, ..., {@code dims[n-1]}. */ public static native @ByVal Status MakeShape(@Const IntPointer dims, @Cast("tensorflow::int64") long n, TensorShape out); public static native @ByVal Status MakeShape(@Const IntBuffer dims, @Cast("tensorflow::int64") long n, TensorShape out); public static native @ByVal Status MakeShape(@Const int[] dims, @Cast("tensorflow::int64") long n, TensorShape out); public static native @ByVal Status MakeShape(@Cast("const tensorflow::int64*") LongPointer dims, @Cast("tensorflow::int64") long n, TensorShape out); public static native @ByVal Status MakeShape(@Cast("const tensorflow::int64*") LongBuffer dims, @Cast("tensorflow::int64") long n, TensorShape out); public static native @ByVal Status MakeShape(@Cast("const tensorflow::int64*") long[] dims, @Cast("tensorflow::int64") long n, TensorShape out); public static native @ByVal Status MakeShape(@ArraySlice IntPointer shape, TensorShape out); public static native @ByVal Status MakeShape(@ArraySlice IntBuffer shape, TensorShape out); public static native @ByVal Status MakeShape(@ArraySlice int[] shape, TensorShape out); public static native @ByVal Status MakeShape(@Cast("tensorflow::int64*") @ArraySlice LongPointer shape, TensorShape out); public static native @ByVal Status MakeShape(@Cast("tensorflow::int64*") @ArraySlice LongBuffer shape, TensorShape out); public static native @ByVal Status MakeShape(@Cast("tensorflow::int64*") @ArraySlice long[] shape, TensorShape out); public static native @ByVal Status MakeShape(@Const IntPointer dims, @Cast("tensorflow::int64") long n, PartialTensorShape out); public static native @ByVal Status MakeShape(@Const IntBuffer dims, @Cast("tensorflow::int64") long n, PartialTensorShape out); public static native @ByVal Status MakeShape(@Const int[] dims, @Cast("tensorflow::int64") long n, PartialTensorShape out); public static native @ByVal Status MakeShape(@Cast("const tensorflow::int64*") LongPointer dims, @Cast("tensorflow::int64") long n, PartialTensorShape out); public static native @ByVal Status MakeShape(@Cast("const tensorflow::int64*") LongBuffer dims, @Cast("tensorflow::int64") long n, PartialTensorShape out); public static native @ByVal Status MakeShape(@Cast("const tensorflow::int64*") long[] dims, @Cast("tensorflow::int64") long n, PartialTensorShape out); public static native @ByVal Status MakeShape(@ArraySlice IntPointer shape, PartialTensorShape out); public static native @ByVal Status MakeShape(@ArraySlice IntBuffer shape, PartialTensorShape out); public static native @ByVal Status MakeShape(@ArraySlice int[] shape, PartialTensorShape out); public static native @ByVal Status MakeShape(@Cast("tensorflow::int64*") @ArraySlice LongPointer shape, PartialTensorShape out); public static native @ByVal Status MakeShape(@Cast("tensorflow::int64*") @ArraySlice LongBuffer shape, PartialTensorShape out); public static native @ByVal Status MakeShape(@Cast("tensorflow::int64*") @ArraySlice long[] shape, PartialTensorShape out); public static native @StdString BytePointer ShapeListString(@Cast("const tensorflow::gtl::ArraySlice*") @ByRef TensorShapeVector shapes); /** \brief Returns true iff {@code shape} starts with {@code prefix}. */ public static native @Cast("bool") boolean StartsWith(@Const @ByRef TensorShape shape, @Const @ByRef TensorShape prefix); /** \brief Returns true iff {@code shape} ends with {@code suffix}. */ public static native @Cast("bool") boolean EndsWith(@Const @ByRef TensorShape shape, @Const @ByRef TensorShape suffix); /** \brief Returns the product of values in an int64 array, * or a failing Status if the array represents a value larger than * a {@code TensorShape} can hold. */ public static native @ByVal Status NumElements(@Cast("tensorflow::int64*") @ArraySlice LongPointer shape, @Cast("tensorflow::int64*") LongPointer num_elements); public static native @ByVal Status NumElements(@Cast("tensorflow::int64*") @ArraySlice LongBuffer shape, @Cast("tensorflow::int64*") LongBuffer num_elements); public static native @ByVal Status NumElements(@Cast("tensorflow::int64*") @ArraySlice long[] shape, @Cast("tensorflow::int64*") long... num_elements); } /** Manages the partially known dimensions of a Tensor and their sizes. */ @Namespace("tensorflow") public static class PartialTensorShape extends TensorShapeBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PartialTensorShape(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public PartialTensorShape(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public PartialTensorShape position(long position) { return (PartialTensorShape)super.position(position); } public PartialTensorShape() { super((Pointer)null); allocate(); } private native void allocate(); /** Add a dimension to the end ("inner-most"), returns a new * PartialTensorShape. * REQUIRES: {@code size >= -1}, where -1 means unknown */ public native @ByVal PartialTensorShape Concatenate(@Cast("tensorflow::int64") long size); /** Appends all the dimensions from {@code shape}. Returns a new * PartialTensorShape. */ public native @ByVal PartialTensorShape Concatenate(@Const @ByRef PartialTensorShape shape); /** Merges all the dimensions from {@code shape}. Returns * {@code InvalidArgument} error if either {@code shape} has a different rank * or if any of the dimensions are incompatible. */ public native @ByVal Status MergeWith(@Const @ByRef PartialTensorShape shape, PartialTensorShape result); /** Exact equality test. Returns true iff the ranks match (i.e., both are * unknown, or both are known and equal), and all dimensions are equal (i.e., * both dimensions are known, or both are known and equal). This is a * stronger condition that IsCompatibleWith. */ public native @Cast("bool") boolean IsIdenticalTo(@Const @ByRef PartialTensorShape shape); /** Return true iff the ranks match, and if the * dimensions all either match or one is unknown. */ public native @Cast("bool") boolean IsCompatibleWith(@Const @ByRef PartialTensorShape shape); // Fill `*shape` from `*this`. // If `*this` is not fully defined, returns false and // `*shape` is left in an intermediate state. Otherwise // returns true. public native @Cast("bool") boolean AsTensorShape(TensorShape shape); /** \brief Returns a {@code PartialTensorShape} whose dimensions are * {@code dims[0]}, {@code dims[1]}, ..., {@code dims[n-1]}. Values of -1 are * considered "unknown". */ } /** \brief Static helper routines for {@code PartialTensorShape}. Includes a few * common predicates on a partially known tensor shape. */ @Namespace("tensorflow") public static class PartialTensorShapeUtils extends Pointer { static { Loader.load(); } /** Default native constructor. */ public PartialTensorShapeUtils() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public PartialTensorShapeUtils(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PartialTensorShapeUtils(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public PartialTensorShapeUtils position(long position) { return (PartialTensorShapeUtils)super.position(position); } public static native @StdString BytePointer PartialShapeListString( @ArraySlice PartialTensorShape shapes); public static native @Cast("bool") boolean AreIdentical(@ArraySlice PartialTensorShape shapes0, @ArraySlice PartialTensorShape shapes1); public static native @Cast("bool") boolean AreCompatible(@ArraySlice PartialTensorShape shapes0, @ArraySlice PartialTensorShape shapes1); } // ---------------------------------------------------------------------------- // Template method implementation details below // ---------------------------------------------------------------------------- // ---------------------------------------------------------------------------- // Inlining of some performance critical routines // ---------------------------------------------------------------------------- // Declare explicit instantiations in .cc file // namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_TENSOR_SHAPE_H_ // Parsed from tensorflow/core/framework/tensor_util.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_TENSOR_UTIL_H_ // #define TENSORFLOW_CORE_FRAMEWORK_TENSOR_UTIL_H_ // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor.pb.h" // #include "tensorflow/core/framework/tensor_shape.pb.h" // #include // DeepCopy returns a tensor whose contents are a deep copy of the // contents of 'other'. This function is intended only for // convenience, not speed. // // REQUIRES: 'other' must point to data stored in CPU memory. // REQUIRES: 'other' must be a Tensor of a copy-able type if // 'other' is not appropriately memory-aligned. @Namespace("tensorflow::tensor") public static native @ByVal Tensor DeepCopy(@Const @ByRef Tensor other); // Concatenates 'tensors' into a single tensor, along their 0th dimension. // // REQUIRES: All members of 'tensors' must have the same data type parameter. // REQUIRES: Each member of 'tensors' must have at least one dimension. // REQUIRES: Each member of 'tensors' must point to data stored in CPU memory. // REQUIRES: Each member of 'tensors' must be a Tensor of a copy-able type if it // is not appropriately memory-aligned. @Namespace("tensorflow::tensor") public static native @ByVal Status Concat(@Const @ByRef TensorVector tensors, Tensor result); // Splits 'tensor' into 'sizes.size()' individual tensors, along the 0th // dimension. The ith output tensor has 0th-dimension size 'sizes[i]'. // // REQUIRES: 'tensor' must have at least one dimension. // REQUIRES: 'tensor.dim_size(0)' must equal the sum of the elements of 'sizes'. // REQUIRES: 'tensor' must point to data stored in CPU memory. // REQUIRES: 'tensor' must be a Tensor of a copy-able type if it is not // appropriately memory-aligned. // // Split() and Concat() are inverse operations. @Namespace("tensorflow::tensor") public static native @ByVal Status Split(@Const @ByRef Tensor tensor, @Cast("tensorflow::int64*") @ArraySlice LongPointer sizes, TensorVector result); @Namespace("tensorflow::tensor") public static native @ByVal Status Split(@Const @ByRef Tensor tensor, @Cast("tensorflow::int64*") @ArraySlice LongBuffer sizes, TensorVector result); @Namespace("tensorflow::tensor") public static native @ByVal Status Split(@Const @ByRef Tensor tensor, @Cast("tensorflow::int64*") @ArraySlice long[] sizes, TensorVector result); @Namespace("tensorflow::tensor::internal") public static native void SetTensorProtoShape(@Cast("size_t*") @StdVector SizeTPointer shape, TensorShapeProto shape_proto); // Defines value type dependent methods to manipulate `TensorProto`. // Class specializations has to define following methods: // static DataType GetDataType() // static void AddValue(Type value, TensorProto* proto) // namespace internal // Creates a 'TensorProto' with specified shape and values. // The dtype and a field to represent data values of the returned 'TensorProto' // are determined based on type of the 'values' parameter. // namespace tensor // namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_TENSOR_UTIL_H_ // Parsed from tensorflow/core/framework/tensor_reference.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_FRAMEWORK_TENSOR_REFERENCE_H_ // #define TENSORFLOW_FRAMEWORK_TENSOR_REFERENCE_H_ // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/lib/gtl/inlined_vector.h" // An opaque class that holds a reference to an underlying TensorBuffer. // Unlike Tensor, it does not have any shape or type information, so // it is cheaper to construct/move, but the only thing you can really do // with it is Unref it, which releases one of the references to the underlying // TensorBuffer. // IMPORTANT: If you do not call Unref(), you will likely leak tensor memory. @Namespace("tensorflow") @NoOffset public static class TensorReference extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorReference(Pointer p) { super(p); } // Take the reference of the root buffer so the size will be more accurate public TensorReference(@Const @ByRef Tensor tensor) { super((Pointer)null); allocate(tensor); } private native void allocate(@Const @ByRef Tensor tensor); public native void Unref(); // Return an estimate of the total bytes being kept alive by this reference. public native @Cast("size_t") long TotalBytes(); public native void FillDescription(AllocationDescription description); // Convenience function for de-duplicating tensor references. public native @Cast("bool") boolean SharesBufferWith(@Const @ByRef TensorReference t); // Convenience function for de-duplicating tensor references. public native @Cast("bool") boolean SharesBufferWith(@Const @ByRef Tensor t); // Convenience function for de-duplicating tensor references. public native @Cast("size_t") long BufferHash(); // A constructor used only for tests public TensorReference(TensorBuffer test_buffer) { super((Pointer)null); allocate(test_buffer); } private native void allocate(TensorBuffer test_buffer); } // namespace tensorflow // #endif // TENSORFLOW_FRAMEWORK_TENSOR_REFERENCE_H_ // Parsed from tensorflow/core/framework/tensor.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_TENSOR_H_ // #define TENSORFLOW_CORE_FRAMEWORK_TENSOR_H_ // #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" // #include "tensorflow/core/framework/allocator.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/tensor_types.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/framework/types.pb.h" // #include "tensorflow/core/lib/core/refcount.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/lib/core/stringpiece.h" // #include "tensorflow/core/lib/gtl/inlined_vector.h" // #include "tensorflow/core/platform/logging.h" // #include "tensorflow/core/platform/macros.h" // #include "tensorflow/core/platform/types.h" // Forward declarations. In particular, we forward declare protos so that their // symbols can be removed from .so exports. @Namespace("tensorflow::batch_util") public static native @ByVal Status CopyElementToSlice(@ByVal Tensor element, Tensor parent, @Cast("tensorflow::int64") long index); @Namespace("tensorflow::batch_util") public static native @ByVal Status MaybeMoveSliceToElement(Tensor parent, Tensor element, @Cast("tensorflow::int64") long index); // namespace batch_util /** \ingroup core * Represents an n-dimensional array of values. */ @Namespace("tensorflow") @NoOffset public static class Tensor extends AbstractTensor { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Tensor(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Tensor(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public Tensor position(long position) { return (Tensor)super.position(position); } /** \brief Creates a 1-dimensional, 0-element float tensor. * * The returned Tensor is not a scalar (shape {}), but is instead * an empty one-dimensional Tensor (shape {0}, NumElements() == * 0). Since it has no elements, it does not need to be assigned a * value and is initialized by default (IsInitialized() is * true). If this is undesirable, consider creating a one-element * scalar which does require initialization: * *

{@code c++
   * 
   *      Tensor(DT_FLOAT, TensorShape({}))
   * 
   *  }
*/ /// public Tensor() { super((Pointer)null); allocate(); } private native void allocate(); /** \brief Creates a Tensor of the given {@code type} and {@code shape}. If * LogMemory::IsEnabled() the allocation is logged as coming from * an unknown kernel and step. Calling the Tensor constructor * directly from within an Op is deprecated: use the * OpKernelConstruction/OpKernelContext allocate_* methods to * allocate a new tensor, which record the kernel and step. * * The underlying buffer is allocated using a {@code CPUAllocator}. */ /// public Tensor(@Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape) { super((Pointer)null); allocate(type, shape); } private native void allocate(@Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape); /** \brief Creates a tensor with the input {@code type} and {@code shape}, using * the allocator {@code a} to allocate the underlying buffer. If * LogMemory::IsEnabled() the allocation is logged as coming from * an unknown kernel and step. Calling the Tensor constructor * directly from within an Op is deprecated: use the * OpKernelConstruction/OpKernelContext allocate_* methods to * allocate a new tensor, which record the kernel and step. * * {@code a} must outlive the lifetime of this Tensor. */ /// public Tensor(Allocator a, @Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape) { super((Pointer)null); allocate(a, type, shape); } private native void allocate(Allocator a, @Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape); /** \brief Creates a tensor with the input {@code type} and {@code shape}, using * the allocator {@code a} and the specified "allocation_attr" to * allocate the underlying buffer. If the kernel and step are known * allocation_attr.allocation_will_be_logged should be set to true * and LogMemory::RecordTensorAllocation should be called after the * tensor is constructed. Calling the Tensor constructor directly * from within an Op is deprecated: use the * OpKernelConstruction/OpKernelContext allocate_* methods to * allocate a new tensor, which record the kernel and step. * * {@code a} must outlive the lifetime of this Tensor. */ /// public Tensor(Allocator a, @Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape, @Const @ByRef AllocationAttributes allocation_attr) { super((Pointer)null); allocate(a, type, shape, allocation_attr); } private native void allocate(Allocator a, @Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape, @Const @ByRef AllocationAttributes allocation_attr); /** \brief Creates an empty Tensor of the given data type. * * Like Tensor(), returns a 1-dimensional, 0-element Tensor with * IsInitialized() returning True. See the Tensor() documentation * for details. */ public Tensor(@Cast("tensorflow::DataType") int type) { super((Pointer)null); allocate(type); } private native void allocate(@Cast("tensorflow::DataType") int type); /** Copy constructor. */ public Tensor(@Const @ByRef Tensor other) { super((Pointer)null); allocate(other); } private native void allocate(@Const @ByRef Tensor other); /** \brief Move constructor. After this call, is safely destructible * and can be assigned to, but other calls on it (e.g. shape manipulation) * are not valid. */ // Creates a tensor with the input datatype, shape and buf. // // Acquires a ref on buf that belongs to this Tensor. public Tensor(@Cast("tensorflow::DataType") int type, TensorShape shape, TensorBuffer buf) { super((Pointer)null); allocate(type, shape, buf); this.buffer = buf; } private native void allocate(@Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape, TensorBuffer buf); private TensorBuffer buffer; // a reference to prevent deallocation public Tensor(@Cast("tensorflow::DataType") int type, TensorShape shape, final Pointer data) { this(type, shape, new TensorBuffer() { @Override public Pointer data() { return data; } @Override public long size() { return data.limit(); } @Override public TensorBuffer root_buffer() { return this; } @Override public void FillAllocationDescription(AllocationDescription proto) { } }); } /** Returns the data type. */ public native @Cast("tensorflow::DataType") int dtype(); /** Returns the shape of the tensor. */ /// public native @Const @ByRef TensorShape shape(); /** \brief Convenience accessor for the tensor shape. * * For all shape accessors, see comments for relevant methods of * {@code TensorShape} in {@code tensor_shape.h}. */ public native int dims(); /** Convenience accessor for the tensor shape. */ public native @Cast("tensorflow::int64") long dim_size(int d); /** Convenience accessor for the tensor shape. */ public native @Cast("tensorflow::int64") long NumElements(); public native @Cast("bool") boolean IsSameSize(@Const @ByRef Tensor b); // True iff the two tensors use the same underlying refcounted storage /// public native @Cast("bool") boolean SharesBufferWith(@Const @ByRef Tensor b); /** \brief If necessary, has this Tensor been initialized? * * Zero-element Tensors are always considered initialized, even if they * have never been assigned to and do not have any memory allocated. */ public native @Cast("bool") boolean IsInitialized(); /** Returns the estimated memory usage of this tensor. */ public native @Cast("size_t") long TotalBytes(); // Returns the size of allocated memory for this tensor. public native @Cast("size_t") long AllocatedBytes(); /** Returns true iff this tensor is aligned. */ public native @Cast("bool") boolean IsAligned(); /** Assign operator. This tensor shares other's underlying storage. */ public native @ByRef @Name("operator =") Tensor put(@Const @ByRef Tensor other); /** Move operator. See move constructor for details. */ /** \brief Copy the other tensor into this tensor and reshape it. * * This tensor shares other's underlying storage. Returns {@code true} * iff {@code other.shape()} has the same number of elements of the given * {@code shape}. */ /// /// /// public native @Cast("bool") boolean CopyFrom(@Const @ByRef Tensor other, @Const @ByRef TensorShape shape); /** \brief Slice this tensor along the 1st dimension.

* I.e., the returned tensor satisfies * returned[i, ...] == this[dim0_start + i, ...]. * The returned tensor shares the underlying tensor buffer with this * tensor. * * NOTE: The returned tensor may not satisfy the same alignment * requirement as this tensor depending on the shape. The caller * must check the returned tensor's alignment before calling certain * methods that have alignment requirement (e.g., {@code flat()}, {@code tensor()}). * * NOTE: When fed with an N-dimensional tensor, this method returns a tensor * also with N dimensions. If you want to select a sub tensor, see SubSlice. * * REQUIRES: {@code dims()} >= 1 * REQUIRES: {@code 0 <= dim0_start <= dim0_limit <= dim_size(0)} */ /// /// /// public native @ByVal Tensor Slice(@Cast("tensorflow::int64") long dim0_start, @Cast("tensorflow::int64") long dim0_limit); /** \brief Select a subslice from this tensor along the 1st dimension. * * When fed with an N-dimensional tensor, this method returns a tensor with * N-1 dimensions, where the returned tensor is a subslice of the input * tensor along the first dimension. The N-1 dimensions of the returned * tensor are the last N-1 dimensions of the input tensor. * * NOTE: The returned tensor may not satisfy the same alignment * requirement as this tensor depending on the shape. The caller * must check the returned tensor's alignment before calling certain * methods that have alignment requirement (e.g., {@code flat()}, {@code tensor()}). * * REQUIRES: {@code dims()} >= 1 * REQUIRES: {@code 0 <= dim0_start < dim_size(0)} */ public native @ByVal Tensor SubSlice(@Cast("tensorflow::int64") long index); /** \brief Parse {@code other} and construct the tensor.

* Returns {@code true} iff the parsing succeeds. If the parsing fails, * the state of {@code *this} is unchanged. */ public native @Cast("bool") boolean FromProto(@Const @ByRef TensorProto other); /// public native @Cast("bool") boolean FromProto(Allocator a, @Const @ByRef TensorProto other); /** \brief Fills in {@code proto} with {@code *this} tensor's content. * * {@code AsProtoField()} fills in the repeated field for {@code proto.dtype()}, while * {@code AsProtoTensorContent()} encodes the content in {@code proto.tensor_content()} * in a compact form. */ public native void AsProtoField(TensorProto proto); /// /// /// /// /// public native void AsProtoTensorContent(TensorProto proto); /** \brief Return the tensor data as an {@code Eigen::Tensor} with the type and * sizes of this {@code Tensor}. * * Use these methods when you know the data type and the number of * dimensions of the Tensor and you want an {@code Eigen::Tensor} * automatically sized to the {@code Tensor} sizes. The implementation check * fails if either type or sizes mismatch. * * Example: * *

{@code c++
   * 
   *      typedef float T;
   *      Tensor my_mat(...built with Shape{rows: 3, cols: 5}...);
   *      auto mat = my_mat.matrix();    // 2D Eigen::Tensor, 3 x 5.
   *      auto mat = my_mat.tensor(); // 2D Eigen::Tensor, 3 x 5.
   *      auto vec = my_mat.vec();       // CHECK fails as my_mat is 2D.
   *      auto vec = my_mat.tensor(); // CHECK fails as my_mat is 2D.
   *      auto mat = my_mat.matrix();// CHECK fails as type mismatch.
   * 
   *  }
*/ /** \brief Return the tensor data to an {@code Eigen::Tensor} with the * same size but a bitwise cast to the specified dtype {@code T}. * * Using a bitcast is useful for move and copy operations. * NOTE: this is the same as {@code tensor()} except a bitcast is allowed. */ /** \brief Return the tensor data to an {@code Eigen::Tensor} with the * last dimension elements converted into single elements of a larger type. * * For example, this is useful for kernels that can treat NCHW_VECT_C int8 * tensors as NCHW int32 tensors. The sizeof(T) should equal the size of * the original element type * num elements in the original last dimension. * NDIMS should be 1 less than the original number of dimensions. */ /** \brief Return the tensor data as an {@code Eigen::Tensor} of the data type and a * specified shape. * * These methods allow you to access the data with the dimensions * and sizes of your choice. You do not need to know the number of * dimensions of the Tensor to call them. However, they {@code CHECK} that * the type matches and the dimensions requested creates an * {@code Eigen::Tensor} with the same number of elements as the tensor. * * Example: * *
{@code c++
   * 
   *      typedef float T;
   *      Tensor my_ten(...built with Shape{planes: 4, rows: 3, cols: 5}...);
   *      // 1D Eigen::Tensor, size 60:
   *      auto flat = my_ten.flat();
   *      // 2D Eigen::Tensor 12 x 5:
   *      auto inner = my_ten.flat_inner_dims();
   *      // 2D Eigen::Tensor 4 x 15:
   *      auto outer = my_ten.shaped({4, 15});
   *      // CHECK fails, bad num elements:
   *      auto outer = my_ten.shaped({4, 8});
   *      // 3D Eigen::Tensor 6 x 5 x 2:
   *      auto weird = my_ten.shaped({6, 5, 2});
   *      // CHECK fails, type mismatch:
   *      auto bad   = my_ten.flat();
   * 
   *  }
*/ /** Returns the data as an Eigen::Tensor with NDIMS dimensions, collapsing all * Tensor dimensions but the last NDIMS-1 into the first dimension of the * result. If NDIMS > dims() then leading dimensions of size 1 will be * added to make the output rank NDIMS. */ /** Returns the data as an Eigen::Tensor with NDIMS dimensions, collapsing all * Tensor dimensions but the first NDIMS-1 into the last dimension of the * result. If NDIMS > dims() then trailing dimensions of size 1 will be * added to make the output rank NDIMS. */ /** Returns the data as an Eigen::Tensor with NDIMS dimensions, collapsing the * first 'begin' Tensor dimensions into the first dimension of the result and * the Tensor dimensions of the last dims() - 'begin' - NDIMS into the last * dimension of the result. If 'begin' < 0 then the |'begin'| leading * dimensions of size 1 will be added. If 'begin' + NDIMS > dims() then * 'begin' + NDIMS - dims() trailing dimensions of size 1 will be added. */ /** \brief Return the tensor data to an {@code Eigen::Tensor} with the new * shape specified in {@code new_sizes} and cast to a new dtype {@code T}. * * Using a bitcast is useful for move and copy operations. * The allowed bitcast is the only difference from {@code shaped()}. */ /** \brief Return the Tensor data as a {@code TensorMap} of fixed size 1: * {@code TensorMap>}.

* Using {@code scalar()} allows the compiler to perform optimizations as * the size of the tensor is known at compile time. */ /** Const versions of all the methods above. */ /** \brief Return the tensor data to an {@code Eigen::Tensor} with the * same size but a bitwise cast to the specified dtype {@code T}. * * Using a bitcast is useful for move and copy operations. * NOTE: this is the same as {@code tensor()} except a bitcast is allowed. */ /** \brief Return the tensor data to an {@code Eigen::Tensor} with the * last dimension elements converted into single elements of a larger type. * * For example, this is useful for kernels that can treat NCHW_VECT_C int8 * tensors as NCHW int32 tensors. The sizeof(T) should equal the size of * the original element type * num elements in the original last dimension. * NDIMS should be 1 less than the original number of dimensions. */ /** \brief Return the tensor data to an {@code Eigen::Tensor} with the new * shape specified in {@code new_sizes} and cast to a new dtype {@code T}. * * Using a bitcast is useful for move and copy operations. * The allowed bitcast is the only difference from {@code shaped()}. */ /** Render the first {@code max_entries} values in {@code *this} into a string. */ public native @StdString BytePointer SummarizeValue(@Cast("tensorflow::int64") long max_entries, @Cast("bool") boolean print_v2/*=false*/); public native @StdString BytePointer SummarizeValue(@Cast("tensorflow::int64") long max_entries); /** A human-readable summary of the tensor suitable for debugging. */ public native @StdString BytePointer DebugString(); /** Fill in the {@code TensorDescription} proto with metadata about the * tensor that is useful for monitoring and debugging. */ /// /// /// public native void FillDescription(TensorDescription description); /** \brief Returns a {@code StringPiece} mapping the current tensor's buffer. * * The returned {@code StringPiece} may point to memory location on devices * that the CPU cannot address directly. * * NOTE: The underlying tensor buffer is refcounted, so the lifetime * of the contents mapped by the {@code StringPiece} matches the lifetime of * the buffer; callers should arrange to make sure the buffer does * not get destroyed while the {@code StringPiece} is still used. * * REQUIRES: {@code DataTypeCanUseMemcpy(dtype())}. */ /// public native @StringPiece BytePointer tensor_data(); /** Copy the other tensor into this tensor and reshape it and reinterpret the * buffer's datatype. * * This tensor shares other's underlying storage. */ public native void UnsafeCopyFromInternal(@Const @ByRef Tensor arg0, @Cast("tensorflow::DataType") int dtype, @Const @ByRef TensorShape arg2); } // Implementation details // START_SKIP_DOXYGEN // Interface to access the raw ref-counted data buffer. @Namespace("tensorflow") public static class TensorBuffer extends Pointer { static { Loader.load(); } /** Default native constructor. */ public TensorBuffer() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TensorBuffer(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorBuffer(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public TensorBuffer position(long position) { return (TensorBuffer)super.position(position); } // data() points to a memory region of size() bytes. @Virtual(true) public native @Const({false, false, true}) Pointer data(); @Virtual(true) public native @Cast("size_t") @Const({false, false, true}) long size(); // If this TensorBuffer is sub-buffer of another TensorBuffer, // returns that TensorBuffer. Otherwise, returns this. @Virtual(true) public native TensorBuffer root_buffer(); // Fill metadata about the allocation into the proto. @Virtual(true) public native @Const({false, false, true}) void FillAllocationDescription( AllocationDescription proto); // Whether this TensorBuffer owns the underlying memory. @Virtual public native @Cast("bool") @Const({false, false, true}) boolean OwnsMemory(); } // END_SKIP_DOXYGEN // namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_TENSOR_H_ // Parsed from tensorflow/core/framework/attr_value.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/framework/attr_value.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fattr_5fvalue_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fattr_5fvalue_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // #include // #include // #include "tensorflow/core/framework/tensor.pb.h" // #include "tensorflow/core/framework/tensor_shape.pb.h" // #include "tensorflow/core/framework/types.pb.h" // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fframework_2fattr_5fvalue_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fframework_2fattr_5fvalue_2eproto @Namespace("tensorflow") @Opaque public static class NameAttrList_AttrEntry_DoNotUse extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public NameAttrList_AttrEntry_DoNotUse() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NameAttrList_AttrEntry_DoNotUse(Pointer p) { super(p); } } // namespace tensorflow // namespace protobuf // namespace google // =================================================================== @Namespace("tensorflow") @NoOffset public static class AttrValue_ListValue extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AttrValue_ListValue(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public AttrValue_ListValue(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public AttrValue_ListValue position(long position) { return (AttrValue_ListValue)super.position(position); } public AttrValue_ListValue() { super((Pointer)null); allocate(); } private native void allocate(); public AttrValue_ListValue(@Const @ByRef AttrValue_ListValue from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef AttrValue_ListValue from); public native @ByRef @Name("operator =") AttrValue_ListValue put(@Const @ByRef AttrValue_ListValue from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef AttrValue_ListValue default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const AttrValue_ListValue internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(AttrValue_ListValue other); public native void Swap(AttrValue_ListValue other); // implements Message ---------------------------------------------- public native AttrValue_ListValue New(); public native AttrValue_ListValue New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef AttrValue_ListValue from); public native void MergeFrom(@Const @ByRef AttrValue_ListValue from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated bytes s = 2; public native int s_size(); public native void clear_s(); @MemberGetter public static native int kSFieldNumber(); public static final int kSFieldNumber = kSFieldNumber(); public native @StdString BytePointer s(int index); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_s(int index); public native void set_s(int index, @StdString BytePointer value); public native void set_s(int index, @StdString String value); // #if LANG_CXX11 // #endif public native void set_s(int index, @Const Pointer value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_s(); public native void add_s(@StdString BytePointer value); public native void add_s(@StdString String value); // #if LANG_CXX11 // #endif public native void add_s(@Const Pointer value, @Cast("size_t") long size); // repeated int64 i = 3 [packed = true]; public native int i_size(); public native void clear_i(); @MemberGetter public static native int kIFieldNumber(); public static final int kIFieldNumber = kIFieldNumber(); public native @Cast("google::protobuf::int64") long i(int index); public native void set_i(int index, @Cast("google::protobuf::int64") long value); public native void add_i(@Cast("google::protobuf::int64") long value); // repeated float f = 4 [packed = true]; public native int f_size(); public native void clear_f(); @MemberGetter public static native int kFFieldNumber(); public static final int kFFieldNumber = kFFieldNumber(); public native float f(int index); public native void set_f(int index, float value); public native void add_f(float value); // repeated bool b = 5 [packed = true]; public native int b_size(); public native void clear_b(); @MemberGetter public static native int kBFieldNumber(); public static final int kBFieldNumber = kBFieldNumber(); public native @Cast("bool") boolean b(int index); public native void set_b(int index, @Cast("bool") boolean value); public native void add_b(@Cast("bool") boolean value); // repeated .tensorflow.DataType type = 6 [packed = true]; public native int type_size(); public native void clear_type(); @MemberGetter public static native int kTypeFieldNumber(); public static final int kTypeFieldNumber = kTypeFieldNumber(); public native @Cast("tensorflow::DataType") int type(int index); public native void set_type(int index, @Cast("tensorflow::DataType") int value); public native void add_type(@Cast("tensorflow::DataType") int value); // repeated .tensorflow.TensorShapeProto shape = 7; public native int shape_size(); public native void clear_shape(); @MemberGetter public static native int kShapeFieldNumber(); public static final int kShapeFieldNumber = kShapeFieldNumber(); public native TensorShapeProto mutable_shape(int index); public native @Const @ByRef TensorShapeProto shape(int index); public native TensorShapeProto add_shape(); // repeated .tensorflow.TensorProto tensor = 8; public native int tensor_size(); public native void clear_tensor(); @MemberGetter public static native int kTensorFieldNumber(); public static final int kTensorFieldNumber = kTensorFieldNumber(); public native TensorProto mutable_tensor(int index); public native @Const @ByRef TensorProto tensor(int index); public native TensorProto add_tensor(); // repeated .tensorflow.NameAttrList func = 9; public native int func_size(); public native void clear_func(); @MemberGetter public static native int kFuncFieldNumber(); public static final int kFuncFieldNumber = kFuncFieldNumber(); public native NameAttrList mutable_func(int index); public native @Const @ByRef NameAttrList func(int index); public native NameAttrList add_func(); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class AttrValue extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AttrValue(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public AttrValue(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public AttrValue position(long position) { return (AttrValue)super.position(position); } public AttrValue() { super((Pointer)null); allocate(); } private native void allocate(); public AttrValue(@Const @ByRef AttrValue from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef AttrValue from); public native @ByRef @Name("operator =") AttrValue put(@Const @ByRef AttrValue from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef AttrValue default_instance(); /** enum tensorflow::AttrValue::ValueCase */ public static final int kS = 2, kI = 3, kF = 4, kB = 5, kType = 6, kShape = 7, kTensor = 8, kList = 1, kFunc = 10, kPlaceholder = 9, VALUE_NOT_SET = 0; public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const AttrValue internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(AttrValue other); public native void Swap(AttrValue other); // implements Message ---------------------------------------------- public native AttrValue New(); public native AttrValue New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef AttrValue from); public native void MergeFrom(@Const @ByRef AttrValue from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- public native void clear_s(); @MemberGetter public static native int kSFieldNumber(); public static final int kSFieldNumber = kSFieldNumber(); public native @StdString BytePointer s(); public native void set_s(@StdString BytePointer value); public native void set_s(@StdString String value); // #if LANG_CXX11 // #endif public native void set_s(@Const Pointer value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_s(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_s(); public native void set_allocated_s(@StdString @Cast({"char*", "std::string*"}) BytePointer s); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_s(); public native @Deprecated void unsafe_arena_set_allocated_s( @StdString @Cast({"char*", "std::string*"}) BytePointer s); public native void clear_i(); @MemberGetter public static native int kIFieldNumber(); public static final int kIFieldNumber = kIFieldNumber(); public native @Cast("google::protobuf::int64") long i(); public native void set_i(@Cast("google::protobuf::int64") long value); public native void clear_f(); @MemberGetter public static native int kFFieldNumber(); public static final int kFFieldNumber = kFFieldNumber(); public native float f(); public native void set_f(float value); public native void clear_b(); @MemberGetter public static native int kBFieldNumber(); public static final int kBFieldNumber = kBFieldNumber(); public native @Cast("bool") boolean b(); public native void set_b(@Cast("bool") boolean value); public native void clear_type(); @MemberGetter public static native int kTypeFieldNumber(); public static final int kTypeFieldNumber = kTypeFieldNumber(); public native @Cast("tensorflow::DataType") int type(); public native void set_type(@Cast("tensorflow::DataType") int value); // .tensorflow.TensorShapeProto shape = 7; public native @Cast("bool") boolean has_shape(); public native void clear_shape(); @MemberGetter public static native int kShapeFieldNumber(); public static final int kShapeFieldNumber = kShapeFieldNumber(); public native @Const @ByRef TensorShapeProto shape(); public native TensorShapeProto release_shape(); public native TensorShapeProto mutable_shape(); public native void set_allocated_shape(TensorShapeProto shape); public native void unsafe_arena_set_allocated_shape( TensorShapeProto shape); public native TensorShapeProto unsafe_arena_release_shape(); // .tensorflow.TensorProto tensor = 8; public native @Cast("bool") boolean has_tensor(); public native void clear_tensor(); @MemberGetter public static native int kTensorFieldNumber(); public static final int kTensorFieldNumber = kTensorFieldNumber(); public native @Const @ByRef TensorProto tensor(); public native TensorProto release_tensor(); public native TensorProto mutable_tensor(); public native void set_allocated_tensor(TensorProto tensor); public native void unsafe_arena_set_allocated_tensor( TensorProto tensor); public native TensorProto unsafe_arena_release_tensor(); // .tensorflow.AttrValue.ListValue list = 1; public native @Cast("bool") boolean has_list(); public native void clear_list(); @MemberGetter public static native int kListFieldNumber(); public static final int kListFieldNumber = kListFieldNumber(); public native @Const @ByRef AttrValue_ListValue list(); public native AttrValue_ListValue release_list(); public native AttrValue_ListValue mutable_list(); public native void set_allocated_list(AttrValue_ListValue list); public native void unsafe_arena_set_allocated_list( AttrValue_ListValue list); public native AttrValue_ListValue unsafe_arena_release_list(); // .tensorflow.NameAttrList func = 10; public native @Cast("bool") boolean has_func(); public native void clear_func(); @MemberGetter public static native int kFuncFieldNumber(); public static final int kFuncFieldNumber = kFuncFieldNumber(); public native @Const @ByRef NameAttrList func(); public native NameAttrList release_func(); public native NameAttrList mutable_func(); public native void set_allocated_func(NameAttrList func); public native void unsafe_arena_set_allocated_func( NameAttrList func); public native NameAttrList unsafe_arena_release_func(); public native void clear_placeholder(); @MemberGetter public static native int kPlaceholderFieldNumber(); public static final int kPlaceholderFieldNumber = kPlaceholderFieldNumber(); public native @StdString BytePointer placeholder(); public native void set_placeholder(@StdString BytePointer value); public native void set_placeholder(@StdString String value); // #if LANG_CXX11 // #endif public native void set_placeholder(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_placeholder(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_placeholder(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_placeholder(); public native void set_allocated_placeholder(@StdString @Cast({"char*", "std::string*"}) BytePointer placeholder); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_placeholder(); public native @Deprecated void unsafe_arena_set_allocated_placeholder( @StdString @Cast({"char*", "std::string*"}) BytePointer placeholder); public native void clear_value(); public native @Cast("tensorflow::AttrValue::ValueCase") int value_case(); } // ------------------------------------------------------------------- // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class NameAttrList extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NameAttrList(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public NameAttrList(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public NameAttrList position(long position) { return (NameAttrList)super.position(position); } public NameAttrList() { super((Pointer)null); allocate(); } private native void allocate(); public NameAttrList(@Const @ByRef NameAttrList from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef NameAttrList from); public native @ByRef @Name("operator =") NameAttrList put(@Const @ByRef NameAttrList from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef NameAttrList default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const NameAttrList internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(NameAttrList other); public native void Swap(NameAttrList other); // implements Message ---------------------------------------------- public native NameAttrList New(); public native NameAttrList New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef NameAttrList from); public native void MergeFrom(@Const @ByRef NameAttrList from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // map attr = 2; public native int attr_size(); public native void clear_attr(); @MemberGetter public static native int kAttrFieldNumber(); public static final int kAttrFieldNumber = kAttrFieldNumber(); public native @Const @ByRef StringAttrValueMap attr(); public native StringAttrValueMap mutable_attr(); // string name = 1; public native void clear_name(); @MemberGetter public static native int kNameFieldNumber(); public static final int kNameFieldNumber = kNameFieldNumber(); public native @StdString BytePointer name(); public native void set_name(@StdString BytePointer value); public native void set_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name(); public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_name(); public native @Deprecated void unsafe_arena_set_allocated_name( @StdString @Cast({"char*", "std::string*"}) BytePointer name); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // AttrValue_ListValue // repeated bytes s = 2; // #if LANG_CXX11 // #endif // #if LANG_CXX11 // #endif // repeated int64 i = 3 [packed = true]; // repeated float f = 4 [packed = true]; // repeated bool b = 5 [packed = true]; // repeated .tensorflow.DataType type = 6 [packed = true]; // repeated .tensorflow.TensorShapeProto shape = 7; // repeated .tensorflow.TensorProto tensor = 8; // repeated .tensorflow.NameAttrList func = 9; // ------------------------------------------------------------------- // AttrValue // bytes s = 2; // #if LANG_CXX11 // #endif // int64 i = 3; // float f = 4; // bool b = 5; // .tensorflow.DataType type = 6; // .tensorflow.TensorShapeProto shape = 7; // .tensorflow.TensorProto tensor = 8; // .tensorflow.AttrValue.ListValue list = 1; // .tensorflow.NameAttrList func = 10; // string placeholder = 9; // #if LANG_CXX11 // #endif // ------------------------------------------------------------------- // ------------------------------------------------------------------- // NameAttrList // string name = 1; // #if LANG_CXX11 // #endif // map attr = 2; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fattr_5fvalue_2eproto // Parsed from tensorflow/core/framework/node_def.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/framework/node_def.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fnode_5fdef_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fnode_5fdef_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // #include // #include // #include "tensorflow/core/framework/attr_value.pb.h" // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fframework_2fnode_5fdef_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fframework_2fnode_5fdef_2eproto @Namespace("tensorflow") @Opaque public static class NodeDef_AttrEntry_DoNotUse extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public NodeDef_AttrEntry_DoNotUse() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NodeDef_AttrEntry_DoNotUse(Pointer p) { super(p); } } // namespace tensorflow // namespace protobuf // namespace google // =================================================================== // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class NodeDef extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NodeDef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public NodeDef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public NodeDef position(long position) { return (NodeDef)super.position(position); } public NodeDef() { super((Pointer)null); allocate(); } private native void allocate(); public NodeDef(@Const @ByRef NodeDef from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef NodeDef from); public native @ByRef @Name("operator =") NodeDef put(@Const @ByRef NodeDef from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef NodeDef default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const NodeDef internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(NodeDef other); public native void Swap(NodeDef other); // implements Message ---------------------------------------------- public native NodeDef New(); public native NodeDef New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef NodeDef from); public native void MergeFrom(@Const @ByRef NodeDef from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated string input = 3; public native int input_size(); public native void clear_input(); @MemberGetter public static native int kInputFieldNumber(); public static final int kInputFieldNumber = kInputFieldNumber(); public native @StdString BytePointer input(int index); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_input(int index); public native void set_input(int index, @StdString BytePointer value); public native void set_input(int index, @StdString String value); // #if LANG_CXX11 // #endif public native void set_input(int index, @Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_input(int index, String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_input(); public native void add_input(@StdString BytePointer value); public native void add_input(@StdString String value); // #if LANG_CXX11 // #endif public native void add_input(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void add_input(String value, @Cast("size_t") long size); // map attr = 5; public native int attr_size(); public native void clear_attr(); @MemberGetter public static native int kAttrFieldNumber(); public static final int kAttrFieldNumber = kAttrFieldNumber(); public native @Const @ByRef StringAttrValueMap attr(); public native StringAttrValueMap mutable_attr(); // string name = 1; public native void clear_name(); @MemberGetter public static native int kNameFieldNumber(); public static final int kNameFieldNumber = kNameFieldNumber(); public native @StdString BytePointer name(); public native void set_name(@StdString BytePointer value); public native void set_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name(); public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_name(); public native @Deprecated void unsafe_arena_set_allocated_name( @StdString @Cast({"char*", "std::string*"}) BytePointer name); // string op = 2; public native void clear_op(); @MemberGetter public static native int kOpFieldNumber(); public static final int kOpFieldNumber = kOpFieldNumber(); public native @StdString BytePointer op(); public native void set_op(@StdString BytePointer value); public native void set_op(@StdString String value); // #if LANG_CXX11 // #endif public native void set_op(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_op(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_op(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_op(); public native void set_allocated_op(@StdString @Cast({"char*", "std::string*"}) BytePointer op); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_op(); public native @Deprecated void unsafe_arena_set_allocated_op( @StdString @Cast({"char*", "std::string*"}) BytePointer op); // string device = 4; public native void clear_device(); @MemberGetter public static native int kDeviceFieldNumber(); public static final int kDeviceFieldNumber = kDeviceFieldNumber(); public native @StdString BytePointer device(); public native void set_device(@StdString BytePointer value); public native void set_device(@StdString String value); // #if LANG_CXX11 // #endif public native void set_device(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_device(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_device(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_device(); public native void set_allocated_device(@StdString @Cast({"char*", "std::string*"}) BytePointer device); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_device(); public native @Deprecated void unsafe_arena_set_allocated_device( @StdString @Cast({"char*", "std::string*"}) BytePointer device); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // ------------------------------------------------------------------- // NodeDef // string name = 1; // #if LANG_CXX11 // #endif // string op = 2; // #if LANG_CXX11 // #endif // repeated string input = 3; // #if LANG_CXX11 // #endif // #if LANG_CXX11 // #endif // string device = 4; // #if LANG_CXX11 // #endif // map attr = 5; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fnode_5fdef_2eproto // Parsed from tensorflow/core/framework/api_def.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/framework/api_def.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fapi_5fdef_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fapi_5fdef_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // #include // #include "tensorflow/core/framework/attr_value.pb.h" // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fframework_2fapi_5fdef_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fframework_2fapi_5fdef_2eproto // namespace tensorflow // namespace protobuf // namespace google /** enum tensorflow::ApiDef_Visibility */ public static final int ApiDef_Visibility_DEFAULT_VISIBILITY = 0, ApiDef_Visibility_VISIBLE = 1, ApiDef_Visibility_SKIP = 2, ApiDef_Visibility_HIDDEN = 3, ApiDef_Visibility_ApiDef_Visibility_INT_MIN_SENTINEL_DO_NOT_USE_ = kint32min, ApiDef_Visibility_ApiDef_Visibility_INT_MAX_SENTINEL_DO_NOT_USE_ = kint32max; @Namespace("tensorflow") public static native @Cast("bool") boolean ApiDef_Visibility_IsValid(int value); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::ApiDef_Visibility") int ApiDef_Visibility_Visibility_MIN(); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::ApiDef_Visibility") int ApiDef_Visibility_Visibility_MAX(); @Namespace("tensorflow") @MemberGetter public static native int ApiDef_Visibility_Visibility_ARRAYSIZE(); @Namespace("tensorflow") public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer ApiDef_Visibility_descriptor(); @Namespace("tensorflow") public static native @StdString BytePointer ApiDef_Visibility_Name(@Cast("tensorflow::ApiDef_Visibility") int value); @Namespace("tensorflow") public static native @Cast("bool") boolean ApiDef_Visibility_Parse( @StdString BytePointer name, @Cast("tensorflow::ApiDef_Visibility*") IntPointer value); @Namespace("tensorflow") public static native @Cast("bool") boolean ApiDef_Visibility_Parse( @StdString String name, @Cast("tensorflow::ApiDef_Visibility*") IntBuffer value); @Namespace("tensorflow") public static native @Cast("bool") boolean ApiDef_Visibility_Parse( @StdString BytePointer name, @Cast("tensorflow::ApiDef_Visibility*") int... value); @Namespace("tensorflow") public static native @Cast("bool") boolean ApiDef_Visibility_Parse( @StdString String name, @Cast("tensorflow::ApiDef_Visibility*") IntPointer value); @Namespace("tensorflow") public static native @Cast("bool") boolean ApiDef_Visibility_Parse( @StdString BytePointer name, @Cast("tensorflow::ApiDef_Visibility*") IntBuffer value); @Namespace("tensorflow") public static native @Cast("bool") boolean ApiDef_Visibility_Parse( @StdString String name, @Cast("tensorflow::ApiDef_Visibility*") int... value); // =================================================================== @Namespace("tensorflow") @NoOffset public static class ApiDef_Endpoint extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApiDef_Endpoint(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public ApiDef_Endpoint(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public ApiDef_Endpoint position(long position) { return (ApiDef_Endpoint)super.position(position); } public ApiDef_Endpoint() { super((Pointer)null); allocate(); } private native void allocate(); public ApiDef_Endpoint(@Const @ByRef ApiDef_Endpoint from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef ApiDef_Endpoint from); public native @ByRef @Name("operator =") ApiDef_Endpoint put(@Const @ByRef ApiDef_Endpoint from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef ApiDef_Endpoint default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const ApiDef_Endpoint internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(ApiDef_Endpoint other); public native void Swap(ApiDef_Endpoint other); // implements Message ---------------------------------------------- public native ApiDef_Endpoint New(); public native ApiDef_Endpoint New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef ApiDef_Endpoint from); public native void MergeFrom(@Const @ByRef ApiDef_Endpoint from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // string name = 1; public native void clear_name(); @MemberGetter public static native int kNameFieldNumber(); public static final int kNameFieldNumber = kNameFieldNumber(); public native @StdString BytePointer name(); public native void set_name(@StdString BytePointer value); public native void set_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name(); public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_name(); public native @Deprecated void unsafe_arena_set_allocated_name( @StdString @Cast({"char*", "std::string*"}) BytePointer name); // bool deprecated = 3; public native void clear_deprecated(); @MemberGetter public static native int kDeprecatedFieldNumber(); public static final int kDeprecatedFieldNumber = kDeprecatedFieldNumber(); public native void set_deprecated(@Cast("bool") boolean value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class ApiDef_Arg extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApiDef_Arg(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public ApiDef_Arg(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public ApiDef_Arg position(long position) { return (ApiDef_Arg)super.position(position); } public ApiDef_Arg() { super((Pointer)null); allocate(); } private native void allocate(); public ApiDef_Arg(@Const @ByRef ApiDef_Arg from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef ApiDef_Arg from); public native @ByRef @Name("operator =") ApiDef_Arg put(@Const @ByRef ApiDef_Arg from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef ApiDef_Arg default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const ApiDef_Arg internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(ApiDef_Arg other); public native void Swap(ApiDef_Arg other); // implements Message ---------------------------------------------- public native ApiDef_Arg New(); public native ApiDef_Arg New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef ApiDef_Arg from); public native void MergeFrom(@Const @ByRef ApiDef_Arg from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // string name = 1; public native void clear_name(); @MemberGetter public static native int kNameFieldNumber(); public static final int kNameFieldNumber = kNameFieldNumber(); public native @StdString BytePointer name(); public native void set_name(@StdString BytePointer value); public native void set_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name(); public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_name(); public native @Deprecated void unsafe_arena_set_allocated_name( @StdString @Cast({"char*", "std::string*"}) BytePointer name); // string rename_to = 2; public native void clear_rename_to(); @MemberGetter public static native int kRenameToFieldNumber(); public static final int kRenameToFieldNumber = kRenameToFieldNumber(); public native @StdString BytePointer rename_to(); public native void set_rename_to(@StdString BytePointer value); public native void set_rename_to(@StdString String value); // #if LANG_CXX11 // #endif public native void set_rename_to(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_rename_to(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_rename_to(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_rename_to(); public native void set_allocated_rename_to(@StdString @Cast({"char*", "std::string*"}) BytePointer rename_to); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_rename_to(); public native @Deprecated void unsafe_arena_set_allocated_rename_to( @StdString @Cast({"char*", "std::string*"}) BytePointer rename_to); // string description = 3; public native void clear_description(); @MemberGetter public static native int kDescriptionFieldNumber(); public static final int kDescriptionFieldNumber = kDescriptionFieldNumber(); public native @StdString BytePointer description(); public native void set_description(@StdString BytePointer value); public native void set_description(@StdString String value); // #if LANG_CXX11 // #endif public native void set_description(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_description(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_description(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_description(); public native void set_allocated_description(@StdString @Cast({"char*", "std::string*"}) BytePointer description); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_description(); public native @Deprecated void unsafe_arena_set_allocated_description( @StdString @Cast({"char*", "std::string*"}) BytePointer description); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class ApiDef_Attr extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApiDef_Attr(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public ApiDef_Attr(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public ApiDef_Attr position(long position) { return (ApiDef_Attr)super.position(position); } public ApiDef_Attr() { super((Pointer)null); allocate(); } private native void allocate(); public ApiDef_Attr(@Const @ByRef ApiDef_Attr from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef ApiDef_Attr from); public native @ByRef @Name("operator =") ApiDef_Attr put(@Const @ByRef ApiDef_Attr from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef ApiDef_Attr default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const ApiDef_Attr internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(ApiDef_Attr other); public native void Swap(ApiDef_Attr other); // implements Message ---------------------------------------------- public native ApiDef_Attr New(); public native ApiDef_Attr New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef ApiDef_Attr from); public native void MergeFrom(@Const @ByRef ApiDef_Attr from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // string name = 1; public native void clear_name(); @MemberGetter public static native int kNameFieldNumber(); public static final int kNameFieldNumber = kNameFieldNumber(); public native @StdString BytePointer name(); public native void set_name(@StdString BytePointer value); public native void set_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name(); public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_name(); public native @Deprecated void unsafe_arena_set_allocated_name( @StdString @Cast({"char*", "std::string*"}) BytePointer name); // string rename_to = 2; public native void clear_rename_to(); @MemberGetter public static native int kRenameToFieldNumber(); public static final int kRenameToFieldNumber = kRenameToFieldNumber(); public native @StdString BytePointer rename_to(); public native void set_rename_to(@StdString BytePointer value); public native void set_rename_to(@StdString String value); // #if LANG_CXX11 // #endif public native void set_rename_to(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_rename_to(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_rename_to(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_rename_to(); public native void set_allocated_rename_to(@StdString @Cast({"char*", "std::string*"}) BytePointer rename_to); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_rename_to(); public native @Deprecated void unsafe_arena_set_allocated_rename_to( @StdString @Cast({"char*", "std::string*"}) BytePointer rename_to); // string description = 4; public native void clear_description(); @MemberGetter public static native int kDescriptionFieldNumber(); public static final int kDescriptionFieldNumber = kDescriptionFieldNumber(); public native @StdString BytePointer description(); public native void set_description(@StdString BytePointer value); public native void set_description(@StdString String value); // #if LANG_CXX11 // #endif public native void set_description(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_description(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_description(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_description(); public native void set_allocated_description(@StdString @Cast({"char*", "std::string*"}) BytePointer description); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_description(); public native @Deprecated void unsafe_arena_set_allocated_description( @StdString @Cast({"char*", "std::string*"}) BytePointer description); // .tensorflow.AttrValue default_value = 3; public native @Cast("bool") boolean has_default_value(); public native void clear_default_value(); @MemberGetter public static native int kDefaultValueFieldNumber(); public static final int kDefaultValueFieldNumber = kDefaultValueFieldNumber(); public native @Const @ByRef AttrValue default_value(); public native AttrValue release_default_value(); public native AttrValue mutable_default_value(); public native void set_allocated_default_value(AttrValue default_value); public native void unsafe_arena_set_allocated_default_value( AttrValue default_value); public native AttrValue unsafe_arena_release_default_value(); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class ApiDef extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApiDef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public ApiDef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public ApiDef position(long position) { return (ApiDef)super.position(position); } public ApiDef() { super((Pointer)null); allocate(); } private native void allocate(); public ApiDef(@Const @ByRef ApiDef from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef ApiDef from); public native @ByRef @Name("operator =") ApiDef put(@Const @ByRef ApiDef from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef ApiDef default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const ApiDef internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(ApiDef other); public native void Swap(ApiDef other); // implements Message ---------------------------------------------- public native ApiDef New(); public native ApiDef New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef ApiDef from); public native void MergeFrom(@Const @ByRef ApiDef from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- @MemberGetter public static native @Cast("const tensorflow::ApiDef::Visibility") int DEFAULT_VISIBILITY(); public static final int DEFAULT_VISIBILITY = DEFAULT_VISIBILITY(); @MemberGetter public static native @Cast("const tensorflow::ApiDef::Visibility") int VISIBLE(); public static final int VISIBLE = VISIBLE(); @MemberGetter public static native @Cast("const tensorflow::ApiDef::Visibility") int SKIP(); public static final int SKIP = SKIP(); @MemberGetter public static native @Cast("const tensorflow::ApiDef::Visibility") int HIDDEN(); public static final int HIDDEN = HIDDEN(); public static native @Cast("bool") boolean Visibility_IsValid(int value); @MemberGetter public static native @Cast("const tensorflow::ApiDef::Visibility") int Visibility_MIN(); public static final int Visibility_MIN = Visibility_MIN(); @MemberGetter public static native @Cast("const tensorflow::ApiDef::Visibility") int Visibility_MAX(); public static final int Visibility_MAX = Visibility_MAX(); @MemberGetter public static native int Visibility_ARRAYSIZE(); public static final int Visibility_ARRAYSIZE = Visibility_ARRAYSIZE(); public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer Visibility_descriptor(); public static native @StdString BytePointer Visibility_Name(@Cast("tensorflow::ApiDef::Visibility") int value); public static native @Cast("bool") boolean Visibility_Parse(@StdString BytePointer name, @Cast("tensorflow::ApiDef::Visibility*") IntPointer value); public static native @Cast("bool") boolean Visibility_Parse(@StdString String name, @Cast("tensorflow::ApiDef::Visibility*") IntBuffer value); public static native @Cast("bool") boolean Visibility_Parse(@StdString BytePointer name, @Cast("tensorflow::ApiDef::Visibility*") int... value); public static native @Cast("bool") boolean Visibility_Parse(@StdString String name, @Cast("tensorflow::ApiDef::Visibility*") IntPointer value); public static native @Cast("bool") boolean Visibility_Parse(@StdString BytePointer name, @Cast("tensorflow::ApiDef::Visibility*") IntBuffer value); public static native @Cast("bool") boolean Visibility_Parse(@StdString String name, @Cast("tensorflow::ApiDef::Visibility*") int... value); // accessors ------------------------------------------------------- // repeated .tensorflow.ApiDef.Endpoint endpoint = 3; public native int endpoint_size(); public native void clear_endpoint(); @MemberGetter public static native int kEndpointFieldNumber(); public static final int kEndpointFieldNumber = kEndpointFieldNumber(); public native ApiDef_Endpoint mutable_endpoint(int index); public native @Const @ByRef ApiDef_Endpoint endpoint(int index); public native ApiDef_Endpoint add_endpoint(); // repeated .tensorflow.ApiDef.Arg in_arg = 4; public native int in_arg_size(); public native void clear_in_arg(); @MemberGetter public static native int kInArgFieldNumber(); public static final int kInArgFieldNumber = kInArgFieldNumber(); public native ApiDef_Arg mutable_in_arg(int index); public native @Const @ByRef ApiDef_Arg in_arg(int index); public native ApiDef_Arg add_in_arg(); // repeated .tensorflow.ApiDef.Arg out_arg = 5; public native int out_arg_size(); public native void clear_out_arg(); @MemberGetter public static native int kOutArgFieldNumber(); public static final int kOutArgFieldNumber = kOutArgFieldNumber(); public native ApiDef_Arg mutable_out_arg(int index); public native @Const @ByRef ApiDef_Arg out_arg(int index); public native ApiDef_Arg add_out_arg(); // repeated .tensorflow.ApiDef.Attr attr = 6; public native int attr_size(); public native void clear_attr(); @MemberGetter public static native int kAttrFieldNumber(); public static final int kAttrFieldNumber = kAttrFieldNumber(); public native ApiDef_Attr mutable_attr(int index); public native @Const @ByRef ApiDef_Attr attr(int index); public native ApiDef_Attr add_attr(); // repeated string arg_order = 11; public native int arg_order_size(); public native void clear_arg_order(); @MemberGetter public static native int kArgOrderFieldNumber(); public static final int kArgOrderFieldNumber = kArgOrderFieldNumber(); public native @StdString BytePointer arg_order(int index); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_arg_order(int index); public native void set_arg_order(int index, @StdString BytePointer value); public native void set_arg_order(int index, @StdString String value); // #if LANG_CXX11 // #endif public native void set_arg_order(int index, @Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_arg_order(int index, String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_arg_order(); public native void add_arg_order(@StdString BytePointer value); public native void add_arg_order(@StdString String value); // #if LANG_CXX11 // #endif public native void add_arg_order(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void add_arg_order(String value, @Cast("size_t") long size); // string graph_op_name = 1; public native void clear_graph_op_name(); @MemberGetter public static native int kGraphOpNameFieldNumber(); public static final int kGraphOpNameFieldNumber = kGraphOpNameFieldNumber(); public native @StdString BytePointer graph_op_name(); public native void set_graph_op_name(@StdString BytePointer value); public native void set_graph_op_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_graph_op_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_graph_op_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_graph_op_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_graph_op_name(); public native void set_allocated_graph_op_name(@StdString @Cast({"char*", "std::string*"}) BytePointer graph_op_name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_graph_op_name(); public native @Deprecated void unsafe_arena_set_allocated_graph_op_name( @StdString @Cast({"char*", "std::string*"}) BytePointer graph_op_name); // string summary = 7; public native void clear_summary(); @MemberGetter public static native int kSummaryFieldNumber(); public static final int kSummaryFieldNumber = kSummaryFieldNumber(); public native @StdString BytePointer summary(); public native void set_summary(@StdString BytePointer value); public native void set_summary(@StdString String value); // #if LANG_CXX11 // #endif public native void set_summary(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_summary(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_summary(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_summary(); public native void set_allocated_summary(@StdString @Cast({"char*", "std::string*"}) BytePointer summary); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_summary(); public native @Deprecated void unsafe_arena_set_allocated_summary( @StdString @Cast({"char*", "std::string*"}) BytePointer summary); // string description = 8; public native void clear_description(); @MemberGetter public static native int kDescriptionFieldNumber(); public static final int kDescriptionFieldNumber = kDescriptionFieldNumber(); public native @StdString BytePointer description(); public native void set_description(@StdString BytePointer value); public native void set_description(@StdString String value); // #if LANG_CXX11 // #endif public native void set_description(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_description(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_description(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_description(); public native void set_allocated_description(@StdString @Cast({"char*", "std::string*"}) BytePointer description); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_description(); public native @Deprecated void unsafe_arena_set_allocated_description( @StdString @Cast({"char*", "std::string*"}) BytePointer description); // string description_prefix = 9; public native void clear_description_prefix(); @MemberGetter public static native int kDescriptionPrefixFieldNumber(); public static final int kDescriptionPrefixFieldNumber = kDescriptionPrefixFieldNumber(); public native @StdString BytePointer description_prefix(); public native void set_description_prefix(@StdString BytePointer value); public native void set_description_prefix(@StdString String value); // #if LANG_CXX11 // #endif public native void set_description_prefix(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_description_prefix(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_description_prefix(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_description_prefix(); public native void set_allocated_description_prefix(@StdString @Cast({"char*", "std::string*"}) BytePointer description_prefix); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_description_prefix(); public native @Deprecated void unsafe_arena_set_allocated_description_prefix( @StdString @Cast({"char*", "std::string*"}) BytePointer description_prefix); // string description_suffix = 10; public native void clear_description_suffix(); @MemberGetter public static native int kDescriptionSuffixFieldNumber(); public static final int kDescriptionSuffixFieldNumber = kDescriptionSuffixFieldNumber(); public native @StdString BytePointer description_suffix(); public native void set_description_suffix(@StdString BytePointer value); public native void set_description_suffix(@StdString String value); // #if LANG_CXX11 // #endif public native void set_description_suffix(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_description_suffix(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_description_suffix(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_description_suffix(); public native void set_allocated_description_suffix(@StdString @Cast({"char*", "std::string*"}) BytePointer description_suffix); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_description_suffix(); public native @Deprecated void unsafe_arena_set_allocated_description_suffix( @StdString @Cast({"char*", "std::string*"}) BytePointer description_suffix); // string deprecation_message = 12; public native void clear_deprecation_message(); @MemberGetter public static native int kDeprecationMessageFieldNumber(); public static final int kDeprecationMessageFieldNumber = kDeprecationMessageFieldNumber(); public native @StdString BytePointer deprecation_message(); public native void set_deprecation_message(@StdString BytePointer value); public native void set_deprecation_message(@StdString String value); // #if LANG_CXX11 // #endif public native void set_deprecation_message(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_deprecation_message(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_deprecation_message(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_deprecation_message(); public native void set_allocated_deprecation_message(@StdString @Cast({"char*", "std::string*"}) BytePointer deprecation_message); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_deprecation_message(); public native @Deprecated void unsafe_arena_set_allocated_deprecation_message( @StdString @Cast({"char*", "std::string*"}) BytePointer deprecation_message); // .tensorflow.ApiDef.Visibility visibility = 2; public native void clear_visibility(); @MemberGetter public static native int kVisibilityFieldNumber(); public static final int kVisibilityFieldNumber = kVisibilityFieldNumber(); public native @Cast("tensorflow::ApiDef_Visibility") int visibility(); public native void set_visibility(@Cast("tensorflow::ApiDef_Visibility") int value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class ApiDefs extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApiDefs(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public ApiDefs(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public ApiDefs position(long position) { return (ApiDefs)super.position(position); } public ApiDefs() { super((Pointer)null); allocate(); } private native void allocate(); public ApiDefs(@Const @ByRef ApiDefs from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef ApiDefs from); public native @ByRef @Name("operator =") ApiDefs put(@Const @ByRef ApiDefs from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef ApiDefs default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const ApiDefs internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(ApiDefs other); public native void Swap(ApiDefs other); // implements Message ---------------------------------------------- public native ApiDefs New(); public native ApiDefs New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef ApiDefs from); public native void MergeFrom(@Const @ByRef ApiDefs from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.ApiDef op = 1; public native int op_size(); public native void clear_op(); @MemberGetter public static native int kOpFieldNumber(); public static final int kOpFieldNumber = kOpFieldNumber(); public native ApiDef mutable_op(int index); public native @Const @ByRef ApiDef op(int index); public native ApiDef add_op(); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // ApiDef_Endpoint // string name = 1; // #if LANG_CXX11 // #endif // bool deprecated = 3; // ------------------------------------------------------------------- // ApiDef_Arg // string name = 1; // #if LANG_CXX11 // #endif // string rename_to = 2; // #if LANG_CXX11 // #endif // string description = 3; // #if LANG_CXX11 // #endif // ------------------------------------------------------------------- // ApiDef_Attr // string name = 1; // #if LANG_CXX11 // #endif // string rename_to = 2; // #if LANG_CXX11 // #endif // .tensorflow.AttrValue default_value = 3; // string description = 4; // #if LANG_CXX11 // #endif // ------------------------------------------------------------------- // ApiDef // string graph_op_name = 1; // #if LANG_CXX11 // #endif // string deprecation_message = 12; // #if LANG_CXX11 // #endif // .tensorflow.ApiDef.Visibility visibility = 2; // repeated .tensorflow.ApiDef.Endpoint endpoint = 3; // repeated .tensorflow.ApiDef.Arg in_arg = 4; // repeated .tensorflow.ApiDef.Arg out_arg = 5; // repeated string arg_order = 11; // #if LANG_CXX11 // #endif // #if LANG_CXX11 // #endif // repeated .tensorflow.ApiDef.Attr attr = 6; // string summary = 7; // #if LANG_CXX11 // #endif // string description = 8; // #if LANG_CXX11 // #endif // string description_prefix = 9; // #if LANG_CXX11 // #endif // string description_suffix = 10; // #if LANG_CXX11 // #endif // ------------------------------------------------------------------- // ApiDefs // repeated .tensorflow.ApiDef op = 1; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // namespace protobuf // namespace google // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fapi_5fdef_2eproto // Parsed from tensorflow/core/framework/op_def.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/framework/op_def.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fop_5fdef_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fop_5fdef_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // #include "tensorflow/core/framework/attr_value.pb.h" // #include "tensorflow/core/framework/types.pb.h" // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fframework_2fop_5fdef_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fframework_2fop_5fdef_2eproto // namespace tensorflow // namespace protobuf // namespace google // =================================================================== @Namespace("tensorflow") @NoOffset public static class OpDef_ArgDef extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OpDef_ArgDef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public OpDef_ArgDef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public OpDef_ArgDef position(long position) { return (OpDef_ArgDef)super.position(position); } public OpDef_ArgDef() { super((Pointer)null); allocate(); } private native void allocate(); public OpDef_ArgDef(@Const @ByRef OpDef_ArgDef from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef OpDef_ArgDef from); public native @ByRef @Name("operator =") OpDef_ArgDef put(@Const @ByRef OpDef_ArgDef from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef OpDef_ArgDef default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const OpDef_ArgDef internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(OpDef_ArgDef other); public native void Swap(OpDef_ArgDef other); // implements Message ---------------------------------------------- public native OpDef_ArgDef New(); public native OpDef_ArgDef New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef OpDef_ArgDef from); public native void MergeFrom(@Const @ByRef OpDef_ArgDef from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // string name = 1; public native void clear_name(); @MemberGetter public static native int kNameFieldNumber(); public static final int kNameFieldNumber = kNameFieldNumber(); public native @StdString BytePointer name(); public native void set_name(@StdString BytePointer value); public native void set_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name(); public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_name(); public native @Deprecated void unsafe_arena_set_allocated_name( @StdString @Cast({"char*", "std::string*"}) BytePointer name); // string description = 2; public native void clear_description(); @MemberGetter public static native int kDescriptionFieldNumber(); public static final int kDescriptionFieldNumber = kDescriptionFieldNumber(); public native @StdString BytePointer description(); public native void set_description(@StdString BytePointer value); public native void set_description(@StdString String value); // #if LANG_CXX11 // #endif public native void set_description(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_description(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_description(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_description(); public native void set_allocated_description(@StdString @Cast({"char*", "std::string*"}) BytePointer description); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_description(); public native @Deprecated void unsafe_arena_set_allocated_description( @StdString @Cast({"char*", "std::string*"}) BytePointer description); // string type_attr = 4; public native void clear_type_attr(); @MemberGetter public static native int kTypeAttrFieldNumber(); public static final int kTypeAttrFieldNumber = kTypeAttrFieldNumber(); public native @StdString BytePointer type_attr(); public native void set_type_attr(@StdString BytePointer value); public native void set_type_attr(@StdString String value); // #if LANG_CXX11 // #endif public native void set_type_attr(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_type_attr(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_type_attr(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_type_attr(); public native void set_allocated_type_attr(@StdString @Cast({"char*", "std::string*"}) BytePointer type_attr); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_type_attr(); public native @Deprecated void unsafe_arena_set_allocated_type_attr( @StdString @Cast({"char*", "std::string*"}) BytePointer type_attr); // string number_attr = 5; public native void clear_number_attr(); @MemberGetter public static native int kNumberAttrFieldNumber(); public static final int kNumberAttrFieldNumber = kNumberAttrFieldNumber(); public native @StdString BytePointer number_attr(); public native void set_number_attr(@StdString BytePointer value); public native void set_number_attr(@StdString String value); // #if LANG_CXX11 // #endif public native void set_number_attr(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_number_attr(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_number_attr(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_number_attr(); public native void set_allocated_number_attr(@StdString @Cast({"char*", "std::string*"}) BytePointer number_attr); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_number_attr(); public native @Deprecated void unsafe_arena_set_allocated_number_attr( @StdString @Cast({"char*", "std::string*"}) BytePointer number_attr); // string type_list_attr = 6; public native void clear_type_list_attr(); @MemberGetter public static native int kTypeListAttrFieldNumber(); public static final int kTypeListAttrFieldNumber = kTypeListAttrFieldNumber(); public native @StdString BytePointer type_list_attr(); public native void set_type_list_attr(@StdString BytePointer value); public native void set_type_list_attr(@StdString String value); // #if LANG_CXX11 // #endif public native void set_type_list_attr(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_type_list_attr(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_type_list_attr(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_type_list_attr(); public native void set_allocated_type_list_attr(@StdString @Cast({"char*", "std::string*"}) BytePointer type_list_attr); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_type_list_attr(); public native @Deprecated void unsafe_arena_set_allocated_type_list_attr( @StdString @Cast({"char*", "std::string*"}) BytePointer type_list_attr); // .tensorflow.DataType type = 3; public native void clear_type(); @MemberGetter public static native int kTypeFieldNumber(); public static final int kTypeFieldNumber = kTypeFieldNumber(); public native @Cast("tensorflow::DataType") int type(); public native void set_type(@Cast("tensorflow::DataType") int value); // bool is_ref = 16; public native void clear_is_ref(); @MemberGetter public static native int kIsRefFieldNumber(); public static final int kIsRefFieldNumber = kIsRefFieldNumber(); public native @Cast("bool") boolean is_ref(); public native void set_is_ref(@Cast("bool") boolean value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class OpDef_AttrDef extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OpDef_AttrDef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public OpDef_AttrDef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public OpDef_AttrDef position(long position) { return (OpDef_AttrDef)super.position(position); } public OpDef_AttrDef() { super((Pointer)null); allocate(); } private native void allocate(); public OpDef_AttrDef(@Const @ByRef OpDef_AttrDef from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef OpDef_AttrDef from); public native @ByRef @Name("operator =") OpDef_AttrDef put(@Const @ByRef OpDef_AttrDef from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef OpDef_AttrDef default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const OpDef_AttrDef internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(OpDef_AttrDef other); public native void Swap(OpDef_AttrDef other); // implements Message ---------------------------------------------- public native OpDef_AttrDef New(); public native OpDef_AttrDef New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef OpDef_AttrDef from); public native void MergeFrom(@Const @ByRef OpDef_AttrDef from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // string name = 1; public native void clear_name(); @MemberGetter public static native int kNameFieldNumber(); public static final int kNameFieldNumber = kNameFieldNumber(); public native @StdString BytePointer name(); public native void set_name(@StdString BytePointer value); public native void set_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name(); public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_name(); public native @Deprecated void unsafe_arena_set_allocated_name( @StdString @Cast({"char*", "std::string*"}) BytePointer name); // string type = 2; public native void clear_type(); @MemberGetter public static native int kTypeFieldNumber(); public static final int kTypeFieldNumber = kTypeFieldNumber(); public native @StdString BytePointer type(); public native void set_type(@StdString BytePointer value); public native void set_type(@StdString String value); // #if LANG_CXX11 // #endif public native void set_type(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_type(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_type(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_type(); public native void set_allocated_type(@StdString @Cast({"char*", "std::string*"}) BytePointer type); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_type(); public native @Deprecated void unsafe_arena_set_allocated_type( @StdString @Cast({"char*", "std::string*"}) BytePointer type); // string description = 4; public native void clear_description(); @MemberGetter public static native int kDescriptionFieldNumber(); public static final int kDescriptionFieldNumber = kDescriptionFieldNumber(); public native @StdString BytePointer description(); public native void set_description(@StdString BytePointer value); public native void set_description(@StdString String value); // #if LANG_CXX11 // #endif public native void set_description(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_description(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_description(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_description(); public native void set_allocated_description(@StdString @Cast({"char*", "std::string*"}) BytePointer description); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_description(); public native @Deprecated void unsafe_arena_set_allocated_description( @StdString @Cast({"char*", "std::string*"}) BytePointer description); // .tensorflow.AttrValue default_value = 3; public native @Cast("bool") boolean has_default_value(); public native void clear_default_value(); @MemberGetter public static native int kDefaultValueFieldNumber(); public static final int kDefaultValueFieldNumber = kDefaultValueFieldNumber(); public native @Const @ByRef AttrValue default_value(); public native AttrValue release_default_value(); public native AttrValue mutable_default_value(); public native void set_allocated_default_value(AttrValue default_value); public native void unsafe_arena_set_allocated_default_value( AttrValue default_value); public native AttrValue unsafe_arena_release_default_value(); // .tensorflow.AttrValue allowed_values = 7; public native @Cast("bool") boolean has_allowed_values(); public native void clear_allowed_values(); @MemberGetter public static native int kAllowedValuesFieldNumber(); public static final int kAllowedValuesFieldNumber = kAllowedValuesFieldNumber(); public native @Const @ByRef AttrValue allowed_values(); public native AttrValue release_allowed_values(); public native AttrValue mutable_allowed_values(); public native void set_allocated_allowed_values(AttrValue allowed_values); public native void unsafe_arena_set_allocated_allowed_values( AttrValue allowed_values); public native AttrValue unsafe_arena_release_allowed_values(); // int64 minimum = 6; public native void clear_minimum(); @MemberGetter public static native int kMinimumFieldNumber(); public static final int kMinimumFieldNumber = kMinimumFieldNumber(); public native @Cast("google::protobuf::int64") long minimum(); public native void set_minimum(@Cast("google::protobuf::int64") long value); // bool has_minimum = 5; public native void clear_has_minimum(); @MemberGetter public static native int kHasMinimumFieldNumber(); public static final int kHasMinimumFieldNumber = kHasMinimumFieldNumber(); public native @Cast("bool") boolean has_minimum(); public native void set_has_minimum(@Cast("bool") boolean value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class OpDef extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OpDef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public OpDef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public OpDef position(long position) { return (OpDef)super.position(position); } public OpDef() { super((Pointer)null); allocate(); } private native void allocate(); public OpDef(@Const @ByRef OpDef from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef OpDef from); public native @ByRef @Name("operator =") OpDef put(@Const @ByRef OpDef from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef OpDef default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const OpDef internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(OpDef other); public native void Swap(OpDef other); // implements Message ---------------------------------------------- public native OpDef New(); public native OpDef New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef OpDef from); public native void MergeFrom(@Const @ByRef OpDef from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.OpDef.ArgDef input_arg = 2; public native int input_arg_size(); public native void clear_input_arg(); @MemberGetter public static native int kInputArgFieldNumber(); public static final int kInputArgFieldNumber = kInputArgFieldNumber(); public native OpDef_ArgDef mutable_input_arg(int index); public native @Const @ByRef OpDef_ArgDef input_arg(int index); public native OpDef_ArgDef add_input_arg(); // repeated .tensorflow.OpDef.ArgDef output_arg = 3; public native int output_arg_size(); public native void clear_output_arg(); @MemberGetter public static native int kOutputArgFieldNumber(); public static final int kOutputArgFieldNumber = kOutputArgFieldNumber(); public native OpDef_ArgDef mutable_output_arg(int index); public native @Const @ByRef OpDef_ArgDef output_arg(int index); public native OpDef_ArgDef add_output_arg(); // repeated .tensorflow.OpDef.AttrDef attr = 4; public native int attr_size(); public native void clear_attr(); @MemberGetter public static native int kAttrFieldNumber(); public static final int kAttrFieldNumber = kAttrFieldNumber(); public native OpDef_AttrDef mutable_attr(int index); public native @Const @ByRef OpDef_AttrDef attr(int index); public native OpDef_AttrDef add_attr(); // string name = 1; public native void clear_name(); @MemberGetter public static native int kNameFieldNumber(); public static final int kNameFieldNumber = kNameFieldNumber(); public native @StdString BytePointer name(); public native void set_name(@StdString BytePointer value); public native void set_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name(); public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_name(); public native @Deprecated void unsafe_arena_set_allocated_name( @StdString @Cast({"char*", "std::string*"}) BytePointer name); // string summary = 5; public native void clear_summary(); @MemberGetter public static native int kSummaryFieldNumber(); public static final int kSummaryFieldNumber = kSummaryFieldNumber(); public native @StdString BytePointer summary(); public native void set_summary(@StdString BytePointer value); public native void set_summary(@StdString String value); // #if LANG_CXX11 // #endif public native void set_summary(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_summary(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_summary(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_summary(); public native void set_allocated_summary(@StdString @Cast({"char*", "std::string*"}) BytePointer summary); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_summary(); public native @Deprecated void unsafe_arena_set_allocated_summary( @StdString @Cast({"char*", "std::string*"}) BytePointer summary); // string description = 6; public native void clear_description(); @MemberGetter public static native int kDescriptionFieldNumber(); public static final int kDescriptionFieldNumber = kDescriptionFieldNumber(); public native @StdString BytePointer description(); public native void set_description(@StdString BytePointer value); public native void set_description(@StdString String value); // #if LANG_CXX11 // #endif public native void set_description(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_description(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_description(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_description(); public native void set_allocated_description(@StdString @Cast({"char*", "std::string*"}) BytePointer description); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_description(); public native @Deprecated void unsafe_arena_set_allocated_description( @StdString @Cast({"char*", "std::string*"}) BytePointer description); // .tensorflow.OpDeprecation deprecation = 8; public native @Cast("bool") boolean has_deprecation(); public native void clear_deprecation(); @MemberGetter public static native int kDeprecationFieldNumber(); public static final int kDeprecationFieldNumber = kDeprecationFieldNumber(); public native @Const @ByRef OpDeprecation deprecation(); public native OpDeprecation release_deprecation(); public native OpDeprecation mutable_deprecation(); public native void set_allocated_deprecation(OpDeprecation deprecation); public native void unsafe_arena_set_allocated_deprecation( OpDeprecation deprecation); public native OpDeprecation unsafe_arena_release_deprecation(); // bool is_commutative = 18; public native void clear_is_commutative(); @MemberGetter public static native int kIsCommutativeFieldNumber(); public static final int kIsCommutativeFieldNumber = kIsCommutativeFieldNumber(); public native @Cast("bool") boolean is_commutative(); public native void set_is_commutative(@Cast("bool") boolean value); // bool is_aggregate = 16; public native void clear_is_aggregate(); @MemberGetter public static native int kIsAggregateFieldNumber(); public static final int kIsAggregateFieldNumber = kIsAggregateFieldNumber(); public native @Cast("bool") boolean is_aggregate(); public native void set_is_aggregate(@Cast("bool") boolean value); // bool is_stateful = 17; public native void clear_is_stateful(); @MemberGetter public static native int kIsStatefulFieldNumber(); public static final int kIsStatefulFieldNumber = kIsStatefulFieldNumber(); public native @Cast("bool") boolean is_stateful(); public native void set_is_stateful(@Cast("bool") boolean value); // bool allows_uninitialized_input = 19; public native void clear_allows_uninitialized_input(); @MemberGetter public static native int kAllowsUninitializedInputFieldNumber(); public static final int kAllowsUninitializedInputFieldNumber = kAllowsUninitializedInputFieldNumber(); public native @Cast("bool") boolean allows_uninitialized_input(); public native void set_allows_uninitialized_input(@Cast("bool") boolean value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class OpDeprecation extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OpDeprecation(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public OpDeprecation(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public OpDeprecation position(long position) { return (OpDeprecation)super.position(position); } public OpDeprecation() { super((Pointer)null); allocate(); } private native void allocate(); public OpDeprecation(@Const @ByRef OpDeprecation from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef OpDeprecation from); public native @ByRef @Name("operator =") OpDeprecation put(@Const @ByRef OpDeprecation from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef OpDeprecation default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const OpDeprecation internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(OpDeprecation other); public native void Swap(OpDeprecation other); // implements Message ---------------------------------------------- public native OpDeprecation New(); public native OpDeprecation New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef OpDeprecation from); public native void MergeFrom(@Const @ByRef OpDeprecation from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // string explanation = 2; public native void clear_explanation(); @MemberGetter public static native int kExplanationFieldNumber(); public static final int kExplanationFieldNumber = kExplanationFieldNumber(); public native @StdString BytePointer explanation(); public native void set_explanation(@StdString BytePointer value); public native void set_explanation(@StdString String value); // #if LANG_CXX11 // #endif public native void set_explanation(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_explanation(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_explanation(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_explanation(); public native void set_allocated_explanation(@StdString @Cast({"char*", "std::string*"}) BytePointer explanation); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_explanation(); public native @Deprecated void unsafe_arena_set_allocated_explanation( @StdString @Cast({"char*", "std::string*"}) BytePointer explanation); // int32 version = 1; public native void clear_version(); @MemberGetter public static native int kVersionFieldNumber(); public static final int kVersionFieldNumber = kVersionFieldNumber(); public native @Cast("google::protobuf::int32") int version(); public native void set_version(@Cast("google::protobuf::int32") int value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class OpList extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OpList(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public OpList(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public OpList position(long position) { return (OpList)super.position(position); } public OpList() { super((Pointer)null); allocate(); } private native void allocate(); public OpList(@Const @ByRef OpList from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef OpList from); public native @ByRef @Name("operator =") OpList put(@Const @ByRef OpList from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef OpList default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const OpList internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(OpList other); public native void Swap(OpList other); // implements Message ---------------------------------------------- public native OpList New(); public native OpList New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef OpList from); public native void MergeFrom(@Const @ByRef OpList from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.OpDef op = 1; public native int op_size(); public native void clear_op(); @MemberGetter public static native int kOpFieldNumber(); public static final int kOpFieldNumber = kOpFieldNumber(); public native OpDef mutable_op(int index); public native @Const @ByRef OpDef op(int index); public native OpDef add_op(); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // OpDef_ArgDef // string name = 1; // #if LANG_CXX11 // #endif // string description = 2; // #if LANG_CXX11 // #endif // .tensorflow.DataType type = 3; // string type_attr = 4; // #if LANG_CXX11 // #endif // string number_attr = 5; // #if LANG_CXX11 // #endif // string type_list_attr = 6; // #if LANG_CXX11 // #endif // bool is_ref = 16; // ------------------------------------------------------------------- // OpDef_AttrDef // string name = 1; // #if LANG_CXX11 // #endif // string type = 2; // #if LANG_CXX11 // #endif // .tensorflow.AttrValue default_value = 3; // string description = 4; // #if LANG_CXX11 // #endif // bool has_minimum = 5; // int64 minimum = 6; // .tensorflow.AttrValue allowed_values = 7; // ------------------------------------------------------------------- // OpDef // string name = 1; // #if LANG_CXX11 // #endif // repeated .tensorflow.OpDef.ArgDef input_arg = 2; // repeated .tensorflow.OpDef.ArgDef output_arg = 3; // repeated .tensorflow.OpDef.AttrDef attr = 4; // .tensorflow.OpDeprecation deprecation = 8; // string summary = 5; // #if LANG_CXX11 // #endif // string description = 6; // #if LANG_CXX11 // #endif // bool is_commutative = 18; // bool is_aggregate = 16; // bool is_stateful = 17; // bool allows_uninitialized_input = 19; // ------------------------------------------------------------------- // OpDeprecation // int32 version = 1; // string explanation = 2; // #if LANG_CXX11 // #endif // ------------------------------------------------------------------- // OpList // repeated .tensorflow.OpDef op = 1; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fop_5fdef_2eproto // Parsed from tensorflow/core/framework/function.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/framework/function.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2ffunction_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2ffunction_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // #include // #include // #include "tensorflow/core/framework/attr_value.pb.h" // #include "tensorflow/core/framework/node_def.pb.h" // #include "tensorflow/core/framework/op_def.pb.h" // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fframework_2ffunction_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fframework_2ffunction_2eproto @Namespace("tensorflow") @Opaque public static class FunctionDef_AttrEntry_DoNotUse extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public FunctionDef_AttrEntry_DoNotUse() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FunctionDef_AttrEntry_DoNotUse(Pointer p) { super(p); } } @Namespace("tensorflow") @Opaque public static class FunctionDef_RetEntry_DoNotUse extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public FunctionDef_RetEntry_DoNotUse() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FunctionDef_RetEntry_DoNotUse(Pointer p) { super(p); } } // namespace tensorflow // namespace protobuf // namespace google // =================================================================== @Namespace("tensorflow") @NoOffset public static class FunctionDefLibrary extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FunctionDefLibrary(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public FunctionDefLibrary(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public FunctionDefLibrary position(long position) { return (FunctionDefLibrary)super.position(position); } public FunctionDefLibrary() { super((Pointer)null); allocate(); } private native void allocate(); public FunctionDefLibrary(@Const @ByRef FunctionDefLibrary from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef FunctionDefLibrary from); public native @ByRef @Name("operator =") FunctionDefLibrary put(@Const @ByRef FunctionDefLibrary from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef FunctionDefLibrary default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const FunctionDefLibrary internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(FunctionDefLibrary other); public native void Swap(FunctionDefLibrary other); // implements Message ---------------------------------------------- public native FunctionDefLibrary New(); public native FunctionDefLibrary New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef FunctionDefLibrary from); public native void MergeFrom(@Const @ByRef FunctionDefLibrary from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.FunctionDef function = 1; public native int function_size(); public native void clear_function(); @MemberGetter public static native int kFunctionFieldNumber(); public static final int kFunctionFieldNumber = kFunctionFieldNumber(); public native FunctionDef mutable_function(int index); public native @Const @ByRef FunctionDef function(int index); public native FunctionDef add_function(); // repeated .tensorflow.GradientDef gradient = 2; public native int gradient_size(); public native void clear_gradient(); @MemberGetter public static native int kGradientFieldNumber(); public static final int kGradientFieldNumber = kGradientFieldNumber(); public native GradientDef mutable_gradient(int index); public native @Const @ByRef GradientDef gradient(int index); public native GradientDef add_gradient(); } // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class FunctionDef extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FunctionDef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public FunctionDef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public FunctionDef position(long position) { return (FunctionDef)super.position(position); } public FunctionDef() { super((Pointer)null); allocate(); } private native void allocate(); public FunctionDef(@Const @ByRef FunctionDef from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef FunctionDef from); public native @ByRef @Name("operator =") FunctionDef put(@Const @ByRef FunctionDef from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef FunctionDef default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const FunctionDef internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(FunctionDef other); public native void Swap(FunctionDef other); // implements Message ---------------------------------------------- public native FunctionDef New(); public native FunctionDef New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef FunctionDef from); public native void MergeFrom(@Const @ByRef FunctionDef from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.NodeDef node_def = 3; public native int node_def_size(); public native void clear_node_def(); @MemberGetter public static native int kNodeDefFieldNumber(); public static final int kNodeDefFieldNumber = kNodeDefFieldNumber(); public native NodeDef mutable_node_def(int index); public native @Const @ByRef NodeDef node_def(int index); public native NodeDef add_node_def(); // map ret = 4; public native int ret_size(); public native void clear_ret(); @MemberGetter public static native int kRetFieldNumber(); public static final int kRetFieldNumber = kRetFieldNumber(); public native @Const @ByRef StringStringMap ret(); public native StringStringMap mutable_ret(); // map attr = 5; public native int attr_size(); public native void clear_attr(); @MemberGetter public static native int kAttrFieldNumber(); public static final int kAttrFieldNumber = kAttrFieldNumber(); public native @Const @ByRef StringAttrValueMap attr(); public native StringAttrValueMap mutable_attr(); // .tensorflow.OpDef signature = 1; public native @Cast("bool") boolean has_signature(); public native void clear_signature(); @MemberGetter public static native int kSignatureFieldNumber(); public static final int kSignatureFieldNumber = kSignatureFieldNumber(); public native @Const @ByRef OpDef signature(); public native OpDef release_signature(); public native OpDef mutable_signature(); public native void set_allocated_signature(OpDef signature); public native void unsafe_arena_set_allocated_signature( OpDef signature); public native OpDef unsafe_arena_release_signature(); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class GradientDef extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GradientDef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public GradientDef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public GradientDef position(long position) { return (GradientDef)super.position(position); } public GradientDef() { super((Pointer)null); allocate(); } private native void allocate(); public GradientDef(@Const @ByRef GradientDef from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef GradientDef from); public native @ByRef @Name("operator =") GradientDef put(@Const @ByRef GradientDef from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef GradientDef default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const GradientDef internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(GradientDef other); public native void Swap(GradientDef other); // implements Message ---------------------------------------------- public native GradientDef New(); public native GradientDef New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef GradientDef from); public native void MergeFrom(@Const @ByRef GradientDef from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // string function_name = 1; public native void clear_function_name(); @MemberGetter public static native int kFunctionNameFieldNumber(); public static final int kFunctionNameFieldNumber = kFunctionNameFieldNumber(); public native @StdString BytePointer function_name(); public native void set_function_name(@StdString BytePointer value); public native void set_function_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_function_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_function_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_function_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_function_name(); public native void set_allocated_function_name(@StdString @Cast({"char*", "std::string*"}) BytePointer function_name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_function_name(); public native @Deprecated void unsafe_arena_set_allocated_function_name( @StdString @Cast({"char*", "std::string*"}) BytePointer function_name); // string gradient_func = 2; public native void clear_gradient_func(); @MemberGetter public static native int kGradientFuncFieldNumber(); public static final int kGradientFuncFieldNumber = kGradientFuncFieldNumber(); public native @StdString BytePointer gradient_func(); public native void set_gradient_func(@StdString BytePointer value); public native void set_gradient_func(@StdString String value); // #if LANG_CXX11 // #endif public native void set_gradient_func(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_gradient_func(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_gradient_func(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_gradient_func(); public native void set_allocated_gradient_func(@StdString @Cast({"char*", "std::string*"}) BytePointer gradient_func); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_gradient_func(); public native @Deprecated void unsafe_arena_set_allocated_gradient_func( @StdString @Cast({"char*", "std::string*"}) BytePointer gradient_func); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // FunctionDefLibrary // repeated .tensorflow.FunctionDef function = 1; // repeated .tensorflow.GradientDef gradient = 2; // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // FunctionDef // .tensorflow.OpDef signature = 1; // map attr = 5; // repeated .tensorflow.NodeDef node_def = 3; // map ret = 4; // ------------------------------------------------------------------- // GradientDef // string function_name = 1; // #if LANG_CXX11 // #endif // string gradient_func = 2; // #if LANG_CXX11 // #endif // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2ffunction_2eproto // Parsed from tensorflow/core/framework/graph.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/framework/graph.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fgraph_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fgraph_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // #include "tensorflow/core/framework/node_def.pb.h" // #include "tensorflow/core/framework/function.pb.h" // #include "tensorflow/core/framework/versions.pb.h" // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fframework_2fgraph_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fframework_2fgraph_2eproto // namespace tensorflow // namespace protobuf // namespace google // =================================================================== @Namespace("tensorflow") @NoOffset public static class GraphDef extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GraphDef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public GraphDef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public GraphDef position(long position) { return (GraphDef)super.position(position); } public GraphDef() { super((Pointer)null); allocate(); } private native void allocate(); public GraphDef(@Const @ByRef GraphDef from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef GraphDef from); public native @ByRef @Name("operator =") GraphDef put(@Const @ByRef GraphDef from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef GraphDef default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const GraphDef internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(GraphDef other); public native void Swap(GraphDef other); // implements Message ---------------------------------------------- public native GraphDef New(); public native GraphDef New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef GraphDef from); public native void MergeFrom(@Const @ByRef GraphDef from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.NodeDef node = 1; public native int node_size(); public native void clear_node(); @MemberGetter public static native int kNodeFieldNumber(); public static final int kNodeFieldNumber = kNodeFieldNumber(); public native NodeDef mutable_node(int index); public native @Const @ByRef NodeDef node(int index); public native NodeDef add_node(); // .tensorflow.FunctionDefLibrary library = 2; public native @Cast("bool") boolean has_library(); public native void clear_library(); @MemberGetter public static native int kLibraryFieldNumber(); public static final int kLibraryFieldNumber = kLibraryFieldNumber(); public native @Const @ByRef FunctionDefLibrary library(); public native FunctionDefLibrary release_library(); public native FunctionDefLibrary mutable_library(); public native void set_allocated_library(FunctionDefLibrary library); public native void unsafe_arena_set_allocated_library( FunctionDefLibrary library); public native FunctionDefLibrary unsafe_arena_release_library(); // .tensorflow.VersionDef versions = 4; public native @Cast("bool") boolean has_versions(); public native void clear_versions(); @MemberGetter public static native int kVersionsFieldNumber(); public static final int kVersionsFieldNumber = kVersionsFieldNumber(); public native @Const @ByRef VersionDef versions(); public native VersionDef release_versions(); public native VersionDef mutable_versions(); public native void set_allocated_versions(VersionDef versions); public native void unsafe_arena_set_allocated_versions( VersionDef versions); public native VersionDef unsafe_arena_release_versions(); // int32 version = 3 [deprecated = true]; public native @Deprecated void clear_version(); @MemberGetter public static native @Deprecated int kVersionFieldNumber(); public static final int kVersionFieldNumber = kVersionFieldNumber(); public native @Cast("google::protobuf::int32") @Deprecated int version(); public native @Deprecated void set_version(@Cast("google::protobuf::int32") int value); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // GraphDef // repeated .tensorflow.NodeDef node = 1; // .tensorflow.VersionDef versions = 4; // int32 version = 3 [deprecated = true]; // .tensorflow.FunctionDefLibrary library = 2; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fgraph_2eproto // Parsed from tensorflow/core/framework/session_state.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_SESSION_STATE_H_ // #define TENSORFLOW_CORE_FRAMEWORK_SESSION_STATE_H_ // #include // #include // #include // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/lib/core/errors.h" // #include "tensorflow/core/platform/mutex.h" // The session state remembers the tensors we choose to keep across // multiple run calls. @Namespace("tensorflow") public static class SessionState extends Pointer { static { Loader.load(); } /** Default native constructor. */ public SessionState() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public SessionState(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SessionState(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public SessionState position(long position) { return (SessionState)super.position(position); } // Get a tensor from the session state. public native @ByVal Status GetTensor(@StdString BytePointer handle, Tensor tensor); public native @ByVal Status GetTensor(@StdString String handle, Tensor tensor); // Store a tensor in the session state. public native @ByVal Status AddTensor(@StdString BytePointer handle, @Const @ByRef Tensor tensor); public native @ByVal Status AddTensor(@StdString String handle, @Const @ByRef Tensor tensor); // Delete a tensdor from the session state. public native @ByVal Status DeleteTensor(@StdString BytePointer handle); public native @ByVal Status DeleteTensor(@StdString String handle); public native @Cast("tensorflow::int64") long GetNewId(); @MemberGetter public static native @Cast("const char*") BytePointer kTensorHandleResourceTypeName(); } // The tensor store remembers the tensors we choose to keep for the // current run call. It is available to every op kernel. @Namespace("tensorflow") public static class TensorStore extends Pointer { static { Loader.load(); } /** Default native constructor. */ public TensorStore() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TensorStore(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorStore(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public TensorStore position(long position) { return (TensorStore)super.position(position); } public static class TensorAndKey extends Pointer { static { Loader.load(); } /** Default native constructor. */ public TensorAndKey() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TensorAndKey(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorAndKey(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public TensorAndKey position(long position) { return (TensorAndKey)super.position(position); } public native @ByRef Tensor tensor(); public native TensorAndKey tensor(Tensor tensor); public native @Cast("tensorflow::int64") long id(); public native TensorAndKey id(long id); public native @StdString BytePointer device_name(); public native TensorAndKey device_name(BytePointer device_name); public native @StdString BytePointer GetHandle(@StdString BytePointer tensor_name); public native @StdString String GetHandle(@StdString String tensor_name); } // Add the named tensor to the tensor store for this run. public native @ByVal Status AddTensor(@StdString BytePointer name, @Const @ByRef TensorAndKey tk); public native @ByVal Status AddTensor(@StdString String name, @Const @ByRef TensorAndKey tk); // Save the tensors in the tensor store of this run to the session. public native @ByVal Status SaveTensors(@Const @ByRef StringVector output_names, SessionState session_state); // Returns true if no tensors have been added to this store. public native @Cast("bool") boolean empty(); } // namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_SESSION_STATE_H_ // Parsed from tensorflow/core/framework/types.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_TYPES_H_ // #define TENSORFLOW_CORE_FRAMEWORK_TYPES_H_ // #include // #include // #include // #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" // Disable clang-format to prevent 'FixedPoint' header from being included // before 'Tensor' header on which it depends. // clang-format off // #include "third_party/eigen3/unsupported/Eigen/CXX11/FixedPoint" // clang-format on // #include "tensorflow/core/framework/bfloat16.h" // #include "tensorflow/core/framework/numeric_types.h" // #include "tensorflow/core/framework/resource_handle.h" // #include "tensorflow/core/framework/types.pb.h" // #include "tensorflow/core/lib/core/stringpiece.h" // #include "tensorflow/core/lib/gtl/array_slice.h" // #include "tensorflow/core/lib/gtl/inlined_vector.h" // #include "tensorflow/core/platform/logging.h" // #include "tensorflow/core/platform/types.h" // MemoryType is used to describe whether input or output Tensors of // an OpKernel should reside in "Host memory" (e.g., CPU memory) or // "Device" Memory (CPU memory for CPU devices, GPU memory for GPU // devices). /** enum tensorflow::MemoryType */ public static final int DEVICE_MEMORY = 0, HOST_MEMORY = 1; // A DeviceType is just a string, but we wrap it up in a class to give // some type checking as we're passing these around @Namespace("tensorflow") @NoOffset public static class DeviceType extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DeviceType(Pointer p) { super(p); } public DeviceType(@Cast("const char*") BytePointer type) { super((Pointer)null); allocate(type); } private native void allocate(@Cast("const char*") BytePointer type); public DeviceType(String type) { super((Pointer)null); allocate(type); } private native void allocate(String type); public native @Cast("const char*") BytePointer type(); public native @StdString BytePointer type_string(); public native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef DeviceType other); public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef DeviceType other); public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef DeviceType other); } @Namespace("tensorflow") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef DeviceType d); // Convenient constants that can be passed to a DeviceType constructor @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer DEVICE_CPU(); // "CPU" @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer DEVICE_GPU(); // "GPU" @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer DEVICE_SYCL(); // "SYCL" @Name("tensorflow::DeviceName") public static class DeviceName extends Pointer { static { Loader.load(); } /** Default native constructor. */ public DeviceName() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public DeviceName(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DeviceName(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public DeviceName position(long position) { return (DeviceName)super.position(position); } @MemberGetter public static native @StdString BytePointer value(); } // #if GOOGLE_CUDA // #endif // GOOGLE_CUDA // #ifdef TENSORFLOW_USE_SYCL // #endif // TENSORFLOW_USE_SYCL // Convert the enums to strings for errors: @Namespace("tensorflow") public static native @StdString BytePointer DataTypeString(@Cast("tensorflow::DataType") int dtype); @Namespace("tensorflow") public static native @StdString BytePointer DeviceTypeString(@Const @ByRef DeviceType device_type); @Namespace("tensorflow") public static native @StdString BytePointer DataTypeSliceString(@ByVal @Cast("const tensorflow::DataTypeSlice*") DataTypeVector dtypes); @Namespace("tensorflow") public static native @StdString BytePointer DataTypeVectorString(@Const @ByRef DataTypeVector dtypes); // DataTypeSet represents a set of DataType values as a simple and efficient // bit mask. Note that DataTypeSet cannot represent all DataType values; it // cannot represent any of the DT_*_REF values. @Namespace("tensorflow") @NoOffset public static class DataTypeSet extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DataTypeSet(Pointer p) { super(p); } public DataTypeSet(@Const @ByRef DataTypeSet other) { super((Pointer)null); allocate(other); } private native void allocate(@Const @ByRef DataTypeSet other); public DataTypeSet(int mask) { super((Pointer)null); allocate(mask); } private native void allocate(int mask); public native @Cast("const bool") boolean Contains(@Cast("tensorflow::DataType") int dt); @NoOffset public static class Iterator extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Iterator(Pointer p) { super(p); } public Iterator(@Const @ByRef DataTypeSet set, int pos) { super((Pointer)null); allocate(set, pos); } private native void allocate(@Const @ByRef DataTypeSet set, int pos); public native @Cast("tensorflow::DataType") @Name("operator *") int multiply(); public native @ByRef @Name("operator ++") Iterator increment(); public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef Iterator other); public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef Iterator other); public native @Cast("size_t") @Name("operator -") long subtract(@Const @ByRef Iterator other); } public static native int ctz_uint32(int x); public static native int clz_uint32(int x); public native @ByVal Iterator begin(); public native @ByVal Iterator end(); public native @Cast("size_t") long size(); public native @Const @ByVal @Name("operator |") DataTypeSet or(@Const @ByRef DataTypeSet other); } // If "sp" names a valid type, store it in "*dt" and return true. Otherwise, // return false. @Namespace("tensorflow") public static native @Cast("bool") boolean DataTypeFromString(@StringPiece BytePointer sp, @Cast("tensorflow::DataType*") IntPointer dt); @Namespace("tensorflow") public static native @Cast("bool") boolean DataTypeFromString(@StringPiece String sp, @Cast("tensorflow::DataType*") IntPointer dt); @Namespace("tensorflow") public static native @Const @ByVal DataTypeSet ToSet(@Cast("tensorflow::DataType") int dt); // DT_FLOAT + kDataTypeRefOffset == DT_FLOAT_REF, etc. /** enum tensorflow:: */ public static final int kDataTypeRefOffset = 100; @Namespace("tensorflow") public static native @Cast("bool") boolean IsRefType(@Cast("tensorflow::DataType") int dtype); @Namespace("tensorflow") public static native @Cast("tensorflow::DataType") int MakeRefType(@Cast("tensorflow::DataType") int dtype); @Namespace("tensorflow") public static native @Cast("tensorflow::DataType") int RemoveRefType(@Cast("tensorflow::DataType") int dtype); @Namespace("tensorflow") public static native @Cast("tensorflow::DataType") int BaseType(@Cast("tensorflow::DataType") int dtype); // Returns true if the actual type is the same as or ref of the expected type. @Namespace("tensorflow") public static native @Cast("bool") boolean TypesCompatible(@Cast("tensorflow::DataType") int expected, @Cast("tensorflow::DataType") int actual); // Does not include _ref types. @Namespace("tensorflow") @MemberGetter public static native @Const @ByRef DataTypeSet kAllTypes(); @Namespace("tensorflow") public static native @Const @ByRef DataTypeSet AllTypes(); // #if !defined(IS_MOBILE_PLATFORM) || defined(SUPPORT_SELECTIVE_REGISTRATION) // Types that support '<' and '>'. @Namespace("tensorflow") @MemberGetter public static native @Const @ByRef DataTypeSet kRealNumberTypes(); @Namespace("tensorflow") public static native @Const @ByVal DataTypeSet RealNumberTypes(); // Return the list of all numeric types. // Includes complex and quantized types. // NOTE: On Android, we only include the float and int32 types for now. @Namespace("tensorflow") @MemberGetter public static native @Const @ByRef DataTypeSet kNumberTypes(); @Namespace("tensorflow") @MemberGetter public static native @Const @ByRef DataTypeSet kQuantizedTypes(); // Types that support '<' and '>', including quantized types. @Namespace("tensorflow") @MemberGetter public static native @Const @ByRef DataTypeSet kRealAndQuantizedTypes(); // #elif defined(__ANDROID_TYPES_FULL__) // #else // defined(IS_MOBILE_PLATFORM) && !defined(__ANDROID_TYPES_FULL__) // #endif // defined(IS_MOBILE_PLATFORM) // Validates type T for whether it is a supported DataType. // DataTypeToEnum::v() and DataTypeToEnum::value are the DataType // constants for T, e.g. DataTypeToEnum::v() is DT_FLOAT. // Specializations below // EnumToDataType::Type is the type for DataType constant VALUE, e.g. // EnumToDataType::Type is float. // Specializations below // Template specialization for both DataTypeToEnum and EnumToDataType. // #define MATCH_TYPE_AND_ENUM(TYPE, ENUM) // template <> // struct DataTypeToEnum { // static DataType v() { return ENUM; } // static DataType ref() { return MakeRefType(ENUM); } // static constexpr DataType value = ENUM; // }; // template <> // struct IsValidDataType { // static constexpr bool value = true; // }; // template <> // struct EnumToDataType { // typedef TYPE Type; // } @Name("tensorflow::DataTypeToEnum") public static class DataTypeToEnum extends Pointer { static { Loader.load(); } /** Default native constructor. */ public DataTypeToEnum() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public DataTypeToEnum(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DataTypeToEnum(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public DataTypeToEnum position(long position) { return (DataTypeToEnum)super.position(position); } public static native @Cast("tensorflow::DataType") int v(); public static native @Cast("tensorflow::DataType") int ref(); @MemberGetter public static native @Cast("const tensorflow::DataType") int value(); public static final int value = value(); } @Name("tensorflow::IsValidDataType") public static class IsValidDataType extends Pointer { static { Loader.load(); } /** Default native constructor. */ public IsValidDataType() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public IsValidDataType(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IsValidDataType(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public IsValidDataType position(long position) { return (IsValidDataType)super.position(position); } @MemberGetter public static native @Cast("const bool") boolean value(); public static final boolean value = value(); } @Name("tensorflow::EnumToDataType") public static class EnumToDataType extends Pointer { static { Loader.load(); } /** Default native constructor. */ public EnumToDataType() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public EnumToDataType(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EnumToDataType(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public EnumToDataType position(long position) { return (EnumToDataType)super.position(position); } } // #undef MATCH_TYPE_AND_ENUM // All types not specialized are marked invalid. // Extra validity checking; not part of public API. // TODO(jeff): Maybe unify this with Tensor::CanUseDMA, or the underlying // is_simple in tensor.cc (and possible choose a more general name?) @Namespace("tensorflow") @MemberGetter public static native @Const @ByRef DataTypeSet kDataTypesCanUseMemcpy(); @Namespace("tensorflow") public static native @Cast("bool") boolean DataTypeCanUseMemcpy(@Cast("tensorflow::DataType") int dt); // Returns true iff 'dt' is a real, non-quantized floating point type. @Namespace("tensorflow") @MemberGetter public static native @Const @ByRef DataTypeSet kDataTypeIsFloating(); @Namespace("tensorflow") public static native @Cast("bool") boolean DataTypeIsFloating(@Cast("tensorflow::DataType") int dt); // Returns true iff 'dt' is a complex type. @Namespace("tensorflow") @MemberGetter public static native @Const @ByRef DataTypeSet kDataTypeIsComplex(); @Namespace("tensorflow") public static native @Cast("bool") boolean DataTypeIsComplex(@Cast("tensorflow::DataType") int dt); @Namespace("tensorflow") public static native @Cast("bool") boolean DataTypeIsQuantized(@Cast("tensorflow::DataType") int dt); // Is the dtype nonquantized integral? @Namespace("tensorflow") @MemberGetter public static native @Const @ByRef DataTypeSet kDataTypeIsInteger(); @Namespace("tensorflow") public static native @Cast("bool") boolean DataTypeIsInteger(@Cast("tensorflow::DataType") int dt); // Is the dtype a signed integral type? @Namespace("tensorflow") @MemberGetter public static native @Const @ByRef DataTypeSet kDataTypeIsSigned(); @Namespace("tensorflow") public static native @Cast("bool") boolean DataTypeIsSigned(@Cast("tensorflow::DataType") int dt); // Is the dtype an unsigned integral type? @Namespace("tensorflow") @MemberGetter public static native @Const @ByRef DataTypeSet kDataTypeIsUnsigned(); @Namespace("tensorflow") public static native @Cast("bool") boolean DataTypeIsUnsigned(@Cast("tensorflow::DataType") int dt); // Returns a 0 on failure @Namespace("tensorflow") public static native int DataTypeSize(@Cast("tensorflow::DataType") int dt); // Returns HOST_MEMORY if `dtype` is always on host or is a DT_INT32, // DEVICE_MEMORY otherwise. @Namespace("tensorflow") public static native @Cast("tensorflow::MemoryType") int MTypeFromDType(@Cast("const tensorflow::DataType") int dtype); // Types that always sit on host: DT_STRING, DT_STRING_REF, DT_RESOURCE. // For DT_RESOURCE, the handle always sits on host (even if the underlying // object has device-allocated resources). @Namespace("tensorflow") public static native @Cast("bool") boolean DataTypeAlwaysOnHost(@Cast("tensorflow::DataType") int dt); // namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_TYPES_H_ // Parsed from tensorflow/core/framework/control_flow.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_CONTROL_FLOW_H_ // #define TENSORFLOW_CORE_FRAMEWORK_CONTROL_FLOW_H_ // #include "tensorflow/core/lib/hash/hash.h" // #include "tensorflow/core/platform/logging.h" // #include "tensorflow/core/platform/types.h" @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::uint64") long kIllegalFrameId(); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::int64") long kIllegalIterId(); // For the purpose of control flow, every tensor produced by TensorFlow is // conceptually tagged by a 'FrameAndIter'. FrameAndIter consists of a // 'frame_id' and an 'iter_id'. The tensor value it represents is produced // in the frame with frame_id at the iteration of iter_id. @Namespace("tensorflow") @NoOffset public static class FrameAndIter extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FrameAndIter(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public FrameAndIter(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public FrameAndIter position(long position) { return (FrameAndIter)super.position(position); } public native @Cast("tensorflow::uint64") long frame_id(); public native FrameAndIter frame_id(long frame_id); public native @Cast("tensorflow::int64") long iter_id(); public native FrameAndIter iter_id(long iter_id); public FrameAndIter() { super((Pointer)null); allocate(); } private native void allocate(); public FrameAndIter(@Cast("tensorflow::uint64") long frame, @Cast("tensorflow::int64") long iter) { super((Pointer)null); allocate(frame, iter); } private native void allocate(@Cast("tensorflow::uint64") long frame, @Cast("tensorflow::int64") long iter); public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef FrameAndIter other); } @Namespace("tensorflow") public static class FrameAndIterHash extends Pointer { static { Loader.load(); } /** Default native constructor. */ public FrameAndIterHash() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public FrameAndIterHash(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FrameAndIterHash(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public FrameAndIterHash position(long position) { return (FrameAndIterHash)super.position(position); } public native @Cast("size_t") @Name("operator ()") long apply(@Const @ByRef FrameAndIter key); } // namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_CONTROL_FLOW_H_ // Parsed from tensorflow/core/framework/kernel_def.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/framework/kernel_def.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fkernel_5fdef_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fkernel_5fdef_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // #include "tensorflow/core/framework/attr_value.pb.h" // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fframework_2fkernel_5fdef_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fframework_2fkernel_5fdef_2eproto // namespace tensorflow // namespace protobuf // namespace google // =================================================================== @Namespace("tensorflow") @NoOffset public static class KernelDef_AttrConstraint extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public KernelDef_AttrConstraint(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public KernelDef_AttrConstraint(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public KernelDef_AttrConstraint position(long position) { return (KernelDef_AttrConstraint)super.position(position); } public KernelDef_AttrConstraint() { super((Pointer)null); allocate(); } private native void allocate(); public KernelDef_AttrConstraint(@Const @ByRef KernelDef_AttrConstraint from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef KernelDef_AttrConstraint from); public native @ByRef @Name("operator =") KernelDef_AttrConstraint put(@Const @ByRef KernelDef_AttrConstraint from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef KernelDef_AttrConstraint default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const KernelDef_AttrConstraint internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(KernelDef_AttrConstraint other); public native void Swap(KernelDef_AttrConstraint other); // implements Message ---------------------------------------------- public native KernelDef_AttrConstraint New(); public native KernelDef_AttrConstraint New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef KernelDef_AttrConstraint from); public native void MergeFrom(@Const @ByRef KernelDef_AttrConstraint from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // string name = 1; public native void clear_name(); @MemberGetter public static native int kNameFieldNumber(); public static final int kNameFieldNumber = kNameFieldNumber(); public native @StdString BytePointer name(); public native void set_name(@StdString BytePointer value); public native void set_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name(); public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_name(); public native @Deprecated void unsafe_arena_set_allocated_name( @StdString @Cast({"char*", "std::string*"}) BytePointer name); // .tensorflow.AttrValue allowed_values = 2; public native @Cast("bool") boolean has_allowed_values(); public native void clear_allowed_values(); @MemberGetter public static native int kAllowedValuesFieldNumber(); public static final int kAllowedValuesFieldNumber = kAllowedValuesFieldNumber(); public native @Const @ByRef AttrValue allowed_values(); public native AttrValue release_allowed_values(); public native AttrValue mutable_allowed_values(); public native void set_allocated_allowed_values(AttrValue allowed_values); public native void unsafe_arena_set_allocated_allowed_values( AttrValue allowed_values); public native AttrValue unsafe_arena_release_allowed_values(); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class KernelDef extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public KernelDef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public KernelDef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public KernelDef position(long position) { return (KernelDef)super.position(position); } public KernelDef() { super((Pointer)null); allocate(); } private native void allocate(); public KernelDef(@Const @ByRef KernelDef from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef KernelDef from); public native @ByRef @Name("operator =") KernelDef put(@Const @ByRef KernelDef from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef KernelDef default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const KernelDef internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(KernelDef other); public native void Swap(KernelDef other); // implements Message ---------------------------------------------- public native KernelDef New(); public native KernelDef New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef KernelDef from); public native void MergeFrom(@Const @ByRef KernelDef from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.KernelDef.AttrConstraint constraint = 3; public native int constraint_size(); public native void clear_constraint(); @MemberGetter public static native int kConstraintFieldNumber(); public static final int kConstraintFieldNumber = kConstraintFieldNumber(); public native KernelDef_AttrConstraint mutable_constraint(int index); public native @Const @ByRef KernelDef_AttrConstraint constraint(int index); public native KernelDef_AttrConstraint add_constraint(); // repeated string host_memory_arg = 4; public native int host_memory_arg_size(); public native void clear_host_memory_arg(); @MemberGetter public static native int kHostMemoryArgFieldNumber(); public static final int kHostMemoryArgFieldNumber = kHostMemoryArgFieldNumber(); public native @StdString BytePointer host_memory_arg(int index); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_host_memory_arg(int index); public native void set_host_memory_arg(int index, @StdString BytePointer value); public native void set_host_memory_arg(int index, @StdString String value); // #if LANG_CXX11 // #endif public native void set_host_memory_arg(int index, @Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_host_memory_arg(int index, String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_host_memory_arg(); public native void add_host_memory_arg(@StdString BytePointer value); public native void add_host_memory_arg(@StdString String value); // #if LANG_CXX11 // #endif public native void add_host_memory_arg(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void add_host_memory_arg(String value, @Cast("size_t") long size); // string op = 1; public native void clear_op(); @MemberGetter public static native int kOpFieldNumber(); public static final int kOpFieldNumber = kOpFieldNumber(); public native @StdString BytePointer op(); public native void set_op(@StdString BytePointer value); public native void set_op(@StdString String value); // #if LANG_CXX11 // #endif public native void set_op(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_op(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_op(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_op(); public native void set_allocated_op(@StdString @Cast({"char*", "std::string*"}) BytePointer op); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_op(); public native @Deprecated void unsafe_arena_set_allocated_op( @StdString @Cast({"char*", "std::string*"}) BytePointer op); // string device_type = 2; public native void clear_device_type(); @MemberGetter public static native int kDeviceTypeFieldNumber(); public static final int kDeviceTypeFieldNumber = kDeviceTypeFieldNumber(); public native @StdString BytePointer device_type(); public native void set_device_type(@StdString BytePointer value); public native void set_device_type(@StdString String value); // #if LANG_CXX11 // #endif public native void set_device_type(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_device_type(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_device_type(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_device_type(); public native void set_allocated_device_type(@StdString @Cast({"char*", "std::string*"}) BytePointer device_type); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_device_type(); public native @Deprecated void unsafe_arena_set_allocated_device_type( @StdString @Cast({"char*", "std::string*"}) BytePointer device_type); // string label = 5; public native void clear_label(); @MemberGetter public static native int kLabelFieldNumber(); public static final int kLabelFieldNumber = kLabelFieldNumber(); public native @StdString BytePointer label(); public native void set_label(@StdString BytePointer value); public native void set_label(@StdString String value); // #if LANG_CXX11 // #endif public native void set_label(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_label(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_label(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_label(); public native void set_allocated_label(@StdString @Cast({"char*", "std::string*"}) BytePointer label); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_label(); public native @Deprecated void unsafe_arena_set_allocated_label( @StdString @Cast({"char*", "std::string*"}) BytePointer label); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class KernelList extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public KernelList(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public KernelList(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public KernelList position(long position) { return (KernelList)super.position(position); } public KernelList() { super((Pointer)null); allocate(); } private native void allocate(); public KernelList(@Const @ByRef KernelList from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef KernelList from); public native @ByRef @Name("operator =") KernelList put(@Const @ByRef KernelList from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef KernelList default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const KernelList internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(KernelList other); public native void Swap(KernelList other); // implements Message ---------------------------------------------- public native KernelList New(); public native KernelList New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef KernelList from); public native void MergeFrom(@Const @ByRef KernelList from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.KernelDef kernel = 1; public native int kernel_size(); public native void clear_kernel(); @MemberGetter public static native int kKernelFieldNumber(); public static final int kKernelFieldNumber = kKernelFieldNumber(); public native KernelDef mutable_kernel(int index); public native @Const @ByRef KernelDef kernel(int index); public native KernelDef add_kernel(); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // KernelDef_AttrConstraint // string name = 1; // #if LANG_CXX11 // #endif // .tensorflow.AttrValue allowed_values = 2; // ------------------------------------------------------------------- // KernelDef // string op = 1; // #if LANG_CXX11 // #endif // string device_type = 2; // #if LANG_CXX11 // #endif // repeated .tensorflow.KernelDef.AttrConstraint constraint = 3; // repeated string host_memory_arg = 4; // #if LANG_CXX11 // #endif // #if LANG_CXX11 // #endif // string label = 5; // #if LANG_CXX11 // #endif // ------------------------------------------------------------------- // KernelList // repeated .tensorflow.KernelDef kernel = 1; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // ------------------------------------------------------------------- // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fkernel_5fdef_2eproto // Parsed from tensorflow/core/framework/kernel_def_builder.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_KERNEL_DEF_BUILDER_H_ // #define TENSORFLOW_CORE_FRAMEWORK_KERNEL_DEF_BUILDER_H_ // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/gtl/array_slice.h" // #include "tensorflow/core/platform/macros.h" // #include "tensorflow/core/platform/types.h" // Forward declare proto so that kernels don't need to depend on it // Builder class passed to the REGISTER_KERNEL_BUILDER() macro. @Namespace("tensorflow") @NoOffset public static class KernelDefBuilder extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public KernelDefBuilder(Pointer p) { super(p); } // Starts with just the name field set. // Caller MUST call Build() and take ownership of the result. public KernelDefBuilder(@Cast("const char*") BytePointer op_name) { super((Pointer)null); allocate(op_name); } private native void allocate(@Cast("const char*") BytePointer op_name); public KernelDefBuilder(String op_name) { super((Pointer)null); allocate(op_name); } private native void allocate(String op_name); // Required: specify the type of device this kernel supports. // Returns *this. public native @ByRef KernelDefBuilder Device(@Cast("const char*") BytePointer device_type); public native @ByRef KernelDefBuilder Device(String device_type); // KernelDefBuilder& Device(DeviceType device_type); // Specify that this kernel supports a limited set of values for a // particular type or list(type) attr (a further restriction than // what the Op allows). // Returns *this. public native @ByRef KernelDefBuilder TypeConstraint(@Cast("const char*") BytePointer attr_name, @Cast("tensorflow::DataType*") @ArraySlice IntPointer allowed); public native @ByRef KernelDefBuilder TypeConstraint(String attr_name, @Cast("tensorflow::DataType*") @ArraySlice IntPointer allowed); // Like TypeConstraint but supports just a single type. public native @ByRef KernelDefBuilder TypeConstraint(@Cast("const char*") BytePointer attr_name, @Cast("tensorflow::DataType") int allowed); public native @ByRef KernelDefBuilder TypeConstraint(String attr_name, @Cast("tensorflow::DataType") int allowed); // Like TypeConstraint, but (a) gets the type from a template parameter // and (b) only supports a constraint to a single type. // TODO(josh11b): Support other types of attr constraints as needed. // Specify that this kernel requires/provides an input/output arg // in host memory (instead of the default, device memory). // Returns *this. public native @ByRef KernelDefBuilder HostMemory(@Cast("const char*") BytePointer arg_name); public native @ByRef KernelDefBuilder HostMemory(String arg_name); // Specify that this kernel requires a particular value for the // "_kernel" attr. May only be specified once. Returns *this. public native @ByRef KernelDefBuilder Label(@Cast("const char*") BytePointer label); public native @ByRef KernelDefBuilder Label(String label); // Returns a pointer to a KernelDef with fields set based on the // above calls to this instance. // Caller takes ownership of the result. public native @Const KernelDef Build(); } // IMPLEMENTATION // namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_KERNEL_DEF_BUILDER_H_ // Parsed from tensorflow/core/framework/tracking_allocator.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_TRACKING_ALLOCATOR_H_ // #define TENSORFLOW_CORE_FRAMEWORK_TRACKING_ALLOCATOR_H_ // #include // #include "tensorflow/core/framework/allocator.h" // #include "tensorflow/core/lib/core/refcount.h" // #include "tensorflow/core/lib/gtl/inlined_vector.h" // #include "tensorflow/core/platform/mutex.h" // #include "tensorflow/core/platform/thread_annotations.h" // #include "tensorflow/core/platform/types.h" // TrackingAllocator is a wrapper for an Allocator. It keeps a running // count of the number of bytes allocated through the wrapper. It is // used by the Executor to "charge" allocations to particular Op // executions. Each Op gets a separate TrackingAllocator wrapper // around the underlying allocator. // // The implementation assumes the invariant that all calls to // AllocateRaw by an Op (or work items spawned by the Op) will occur // before the Op's Compute method returns. Thus the high watermark is // established once Compute returns. // // DeallocateRaw can be called long after the Op has finished, // e.g. when an output tensor is deallocated, and the wrapper cannot // be deleted until the last of these calls has occurred. The // TrackingAllocator keeps track of outstanding calls using a // reference count, and deletes itself once the last call has been // received and the high watermark has been retrieved. @Namespace("tensorflow") @NoOffset public static class AllocRecord extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AllocRecord(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public AllocRecord(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public AllocRecord position(long position) { return (AllocRecord)super.position(position); } public AllocRecord(@Cast("tensorflow::int64") long a_btyes, @Cast("tensorflow::int64") long a_micros) { super((Pointer)null); allocate(a_btyes, a_micros); } private native void allocate(@Cast("tensorflow::int64") long a_btyes, @Cast("tensorflow::int64") long a_micros); public AllocRecord() { super((Pointer)null); allocate(); } private native void allocate(); public native @Cast("tensorflow::int64") long alloc_bytes(); public native AllocRecord alloc_bytes(long alloc_bytes); public native @Cast("tensorflow::int64") long alloc_micros(); public native AllocRecord alloc_micros(long alloc_micros); } @Namespace("tensorflow") @NoOffset public static class TrackingAllocator extends Allocator { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TrackingAllocator(Pointer p) { super(p); } public native @StdString BytePointer Name(); public native Pointer AllocateRaw(@Cast("size_t") long alignment, @Cast("size_t") long num_bytes); public native Pointer AllocateRaw(@Cast("size_t") long alignment, @Cast("size_t") long num_bytes, @Const @ByRef AllocationAttributes allocation_attr); public native void DeallocateRaw(Pointer ptr); public native @Cast("bool") boolean TracksAllocationSizes(); public native @Cast("size_t") long RequestedSize(@Const Pointer ptr); public native @Cast("size_t") long AllocatedSize(@Const Pointer ptr); public native @Cast("tensorflow::int64") long AllocationId(@Const Pointer ptr); public native void GetStats(AllocatorStats stats); public native void ClearStats(); // If the underlying allocator tracks allocation sizes, this returns // a tuple where the first value is the total number of bytes // allocated through this wrapper, the second value is the high // watermark of bytes allocated through this wrapper and the third value is // the allocated bytes through this wrapper that are still alive. If the // underlying allocator does not track allocation sizes the first // value is the total number of bytes requested through this wrapper // and the second and the third are 0. // public native @ByVal @Cast("std::tuple*") SizeTPointer GetSizes(); // After GetRecordsAndUnRef is called, the only further calls allowed // on this wrapper are calls to DeallocateRaw with pointers that // were allocated by this wrapper and have not yet been // deallocated. After this call completes and all allocated pointers // have been deallocated the wrapper will delete itself. public native @ByVal AllocRecordVector GetRecordsAndUnRef(); // Returns a copy of allocation records collected so far. public native @ByVal AllocRecordVector GetCurrentRecords(); } // end namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_TRACKING_ALLOCATOR_H_ // Parsed from tensorflow/core/framework/op_kernel.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_OP_KERNEL_H_ // #define TENSORFLOW_CORE_FRAMEWORK_OP_KERNEL_H_ // #include // #include // #include // #include "tensorflow/core/framework/allocator.h" // #include "tensorflow/core/framework/cancellation.h" // #include "tensorflow/core/framework/control_flow.h" // #include "tensorflow/core/framework/device_base.h" // #include "tensorflow/core/framework/kernel_def.pb.h" // #include "tensorflow/core/framework/kernel_def_builder.h" // #include "tensorflow/core/framework/node_def_util.h" // #include "tensorflow/core/framework/op.h" // TODO(b/62899350): Remove // #include "tensorflow/core/framework/rendezvous.h" // #include "tensorflow/core/framework/selective_registration.h" // #include "tensorflow/core/framework/session_state.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/tensor_shape.pb.h" // TODO(b/62899350): Remove // #include "tensorflow/core/framework/tracking_allocator.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/framework/types.pb.h" // #include "tensorflow/core/framework/unique_tensor_references.h" // #include "tensorflow/core/lib/core/errors.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/lib/gtl/array_slice.h" // #include "tensorflow/core/lib/gtl/manual_constructor.h" // #include "tensorflow/core/platform/env.h" // #include "tensorflow/core/platform/logging.h" // #include "tensorflow/core/platform/macros.h" // #include "tensorflow/core/platform/mutex.h" // #include "tensorflow/core/platform/thread_annotations.h" // #include "tensorflow/core/platform/types.h" @Namespace("Eigen") @Opaque public static class ThreadPoolDevice extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public ThreadPoolDevice() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ThreadPoolDevice(Pointer p) { super(p); } } @Namespace("Eigen") @Opaque public static class GpuDevice extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public GpuDevice() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GpuDevice(Pointer p) { super(p); } } @Namespace("Eigen") @Opaque public static class SyclDevice extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public SyclDevice() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SyclDevice(Pointer p) { super(p); } } // end namespace Eigen @Namespace("tensorflow::checkpoint") @Opaque public static class TensorSliceReaderCacheWrapper extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public TensorSliceReaderCacheWrapper() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorSliceReaderCacheWrapper(Pointer p) { super(p); } } // namespace checkpoint // declared below // declared below, @Namespace("tensorflow") @Opaque public static class ResourceMgr extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public ResourceMgr() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceMgr(Pointer p) { super(p); } } @Namespace("tensorflow") @Opaque public static class ScopedStepContainer extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public ScopedStepContainer() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ScopedStepContainer(Pointer p) { super(p); } } @Namespace("tensorflow") @Opaque public static class CollectiveExecutor extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public CollectiveExecutor() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CollectiveExecutor(Pointer p) { super(p); } } @Namespace("tensorflow") @Opaque public static class StepStatsCollectorInterface extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public StepStatsCollectorInterface() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StepStatsCollectorInterface(Pointer p) { super(p); } } @Namespace("tensorflow") @NoOffset public static class OpKernel extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OpKernel(Pointer p) { super(p); } // OpKernel won't be instantiated by the scheduler, so you may perform // expensive initialization in the descendant's constructor. // Specialized constructor that enables the descendant to provide a different // `NodeDef` value. For example, this constructor can be used to provide a // stripped-down `NodeDef` that does not contain the full set of attrs (such // as tensor values) if the descendant stores them in a different form. // An OpKernel's computation can be either synchronous or // asynchronous. All OpKernel Compute() methods must be thread-safe as they // may be called concurrently (e.g. by multiple executions of the same graph // concurrently). // // Most OpKernels should compute synchronously. They should // subclass OpKernel and override the Compute() method and have it // return after completing the supplied work. // // A few special kernels might need to be asynchronous to bound the // number of threads (e.g., network receive operations). These // kernels must subclass AsyncOpKernel and override // AsyncOpKernel::ComputeAsync(). // // In both cases, implementations of Compute() and ComputeAsync() // get inputs and write outputs through the given OpKernelContext // and returns a status via context->SetStatus(). They must be // thread-safe. // Synchronous compute. // // "context" is guaranteed to be alive until Compute() returns. public native void Compute(OpKernelContext context); // Returns nullptr iff this op kernel is synchronous. public native AsyncOpKernel AsAsync(); // Returns true iff this op kernel is considered "expensive". The // runtime may use this flag to optimize graph execution for example // to "inline" inexpensive kernels. public native @Cast("bool") boolean IsExpensive(); // Accessors. public native @Const @ByRef NodeDef def(); public native @StdString BytePointer name(); // Same as def().name() public native @StdString BytePointer type_string(); // Same as def().op() public native @StdString BytePointer requested_device(); // Same as def().device() public native @Cast("bool") boolean is_internal(); public native int num_inputs(); public native @Cast("tensorflow::DataType") int input_type(int i); public native @Const @ByRef DataTypeVector input_types(); public native @Cast("const tensorflow::MemoryTypeVector*") @ByRef AllocatorAttributesVector input_memory_types(); public native @StdString BytePointer requested_input(int i); // Same as def().input(i) public native int num_outputs(); public native @Cast("tensorflow::DataType") int output_type(int o); public native @Const @ByRef DataTypeVector output_types(); public native @Cast("const tensorflow::MemoryTypeVector*") @ByRef AllocatorAttributesVector output_memory_types(); public native @ByVal Status InputRange(@StringPiece BytePointer input_name, IntPointer start, IntPointer stop); public native @ByVal Status InputRange(@StringPiece String input_name, IntBuffer start, IntBuffer stop); public native @ByVal Status InputRange(@StringPiece BytePointer input_name, int[] start, int... stop); public native @ByVal Status InputRange(@StringPiece String input_name, IntPointer start, IntPointer stop); public native @ByVal Status InputRange(@StringPiece BytePointer input_name, IntBuffer start, IntBuffer stop); public native @ByVal Status InputRange(@StringPiece String input_name, int[] start, int... stop); public native @ByVal Status OutputRange(@StringPiece BytePointer output_name, IntPointer start, IntPointer stop); public native @ByVal Status OutputRange(@StringPiece String output_name, IntBuffer start, IntBuffer stop); public native @ByVal Status OutputRange(@StringPiece BytePointer output_name, int[] start, int... stop); public native @ByVal Status OutputRange(@StringPiece String output_name, IntPointer start, IntPointer stop); public native @ByVal Status OutputRange(@StringPiece BytePointer output_name, IntBuffer start, IntBuffer stop); public native @ByVal Status OutputRange(@StringPiece String output_name, int[] start, int... stop); // We allow legacy scalars within Google up until GraphDef version 6. // TODO(irving): Remove when we can drop support for GraphDef version 5. public native @Cast("bool") boolean allow_legacy_scalars(); // Allow either scalars or (if allowing legacy scalars) shape (1,). public native @Cast("bool") boolean IsLegacyScalar(@Const @ByRef TensorShape shape); // Allow rank 1 or (if allowing legacy scalars) rank 0. public native @Cast("bool") boolean IsLegacyVector(@Const @ByRef TensorShape shape); // Turn a shape Tensor into a TensorShape // TODO(irving): Move to TensorShapeUtils once !allow_legacy_scalars public native @ByVal Status MakeShape(@Const @ByRef Tensor shape, TensorShape out); } @Namespace("tensorflow") public static class AsyncOpKernel extends OpKernel { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AsyncOpKernel(Pointer p) { super(p); } // Lift OpKernel constructors. // Asynchronous compute. // // Implementations of ComputeAsync() must run "done" to signal the // completion of the computation. "context" is guaranteed to be // alive until the "done" callback starts. public native void ComputeAsync(OpKernelContext context, @ByVal @Cast("tensorflow::AsyncOpKernel::DoneCallback*") Fn done); public native AsyncOpKernel AsAsync(); public native void Compute(OpKernelContext context); public native @Cast("bool") boolean IsExpensive(); } // Wraps a tensor that is held by an Op across calls to Compute(). For // memory safety when using asynchronous devices like GPUs, the system // must be notified when a Tensor is used inside an Op execution. The // wrapper ensures that all uses of the Tensor are tracked, because in // order to retrieve the Tensor the caller must use AccessTensor which // notifies the context. @Namespace("tensorflow") @NoOffset public static class PersistentTensor extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PersistentTensor(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public PersistentTensor(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public PersistentTensor position(long position) { return (PersistentTensor)super.position(position); } public PersistentTensor() { super((Pointer)null); allocate(); } private native void allocate(); public PersistentTensor(@Const @ByRef Tensor tensor) { super((Pointer)null); allocate(tensor); } private native void allocate(@Const @ByRef Tensor tensor); // Caller does not own the returned Tensor*. public native Tensor AccessTensor(OpKernelConstruction context); // Caller does not own the returned Tensor*. public native Tensor AccessTensor(OpKernelContext context); // The check for initialization does not need to access the // underlying tensor buffer. public native @Cast("bool") boolean IsInitialized(); public native @Cast("tensorflow::int64") long NumElements(); public native @Cast("tensorflow::int64") long AllocatedBytes(); } @Namespace("tensorflow") @NoOffset public static class OpKernelConstruction extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OpKernelConstruction(Pointer p) { super(p); } public OpKernelConstruction(@ByVal DeviceType device_type, DeviceBase device, Allocator allocator, @Const NodeDef node_def, @Const OpDef op_def, FunctionLibraryRuntime flib, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector input_types, @Cast("const tensorflow::MemoryTypeSlice*") @ByRef StringAttrPairVector input_memory_types, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector output_types, @Cast("const tensorflow::MemoryTypeSlice*") @ByRef StringAttrPairVector output_memory_types, int graph_def_version, Status status) { super((Pointer)null); allocate(device_type, device, allocator, node_def, op_def, flib, input_types, input_memory_types, output_types, output_memory_types, graph_def_version, status); } private native void allocate(@ByVal DeviceType device_type, DeviceBase device, Allocator allocator, @Const NodeDef node_def, @Const OpDef op_def, FunctionLibraryRuntime flib, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector input_types, @Cast("const tensorflow::MemoryTypeSlice*") @ByRef StringAttrPairVector input_memory_types, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector output_types, @Cast("const tensorflow::MemoryTypeSlice*") @ByRef StringAttrPairVector output_memory_types, int graph_def_version, Status status); public native Env env(); // Allocation of tensors during kernel construction: // // It is legal to temporarily allocate scratch tensor storage during // Op kernel construction. Scratch tensors should be allocated using // allocate_temp below. Some kernels need to keep tensors in between // invocations. If such a Tensor is allocated during kernel // construction this must be done using allocate_persistent, and the // Op may only store the returned PersistentTensor object. When the // Tensor is needed in a subsequent invocation, it can be retrieved // from the PersistentTensor using the AccessTensor method. This // ensures that the system is made aware of any use of the tensor's // allocated memory, which is needed for correctness on asynchronous // devices such as GPUs. // Allocates a temporary Tensor of the specified type and shape. The // Tensor must not be used after kernel construction is // complete. See comment above. public native @ByVal Status allocate_temp(@Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape, Tensor out_temp); // Allocates a Tensor of the specified type and shape which the Op // plans to maintain as persistent state. out_persistent holds the // PersistentTensor which is the object the caller should store. For // convenience, if out_tensor is non-null then it will be filled in // with a Tensor* pointing to the newly-allocated tensor which the // caller can use instead of calling // out_persistent->AccessTensor. The caller does not own out_tensor // and should not keep a copy of it. See comment above. public native @ByVal Status allocate_persistent(@Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape, PersistentTensor out_persistent, @Cast("tensorflow::Tensor**") PointerPointer out_tensor); public native @ByVal Status allocate_persistent(@Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape, PersistentTensor out_persistent, @ByPtrPtr Tensor out_tensor); // User-supplied configuration of this operation. public native @Const @ByRef NodeDef def(); // For inspecting the inputs to this operation. public native int num_inputs(); public native @Cast("tensorflow::DataType") int input_type(int i); public native @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector input_types(); public native @Cast("const tensorflow::MemoryTypeSlice*") @ByRef StringAttrPairVector input_memory_types(); // For inspecting the outputs expected from this operation. public native int num_outputs(); public native @Cast("tensorflow::DataType") int output_type(int i); public native @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector output_types(); public native @Cast("const tensorflow::MemoryTypeSlice*") @ByRef StringAttrPairVector output_memory_types(); // If expected_inputs == inputs() and expected_outputs == output_types(), // returns OK, else returns INVALID_ARGUMENT with an error message. // Recommended for Ops with dynamic signatures. public native @ByVal Status MatchSignature(@ByVal @Cast("const tensorflow::DataTypeSlice*") DataTypeVector expected_inputs, @ByVal @Cast("const tensorflow::DataTypeSlice*") DataTypeVector expected_outputs); // For recording configuration errors during construction. public native void SetStatus(@Const @ByRef Status status); public native @Const @ByRef Status status(); // Look up the attr with name attr_name and set *value to its value. If no // attr with attr_name is found in def(), or the attr does not have // a matching type, a non-ok status will be returned. // Return true if the attr_name is defined in def(). public native @Cast("bool") boolean HasAttr(@StringPiece BytePointer attr_name); public native @Cast("bool") boolean HasAttr(@StringPiece String attr_name); // Return the device type. public native @Const @ByRef DeviceType device_type(); // If not nullptr, the kernel can instantiate functions defined in // the library. E.g., // CHECK_NOTNULL(function_library())->Instantiate("Foo", ...). public native FunctionLibraryRuntime function_library(); // The GraphDef version whose behavior we should follow. public native int graph_def_version(); // Helper routines for the OP_REQUIRES macros public native void CtxFailure(@Const @ByRef Status s); public native void CtxFailureWithWarning(@Const @ByRef Status s); public native void CtxFailure(@Cast("const char*") BytePointer file, int line, @Const @ByRef Status s); public native void CtxFailure(String file, int line, @Const @ByRef Status s); public native void CtxFailureWithWarning(@Cast("const char*") BytePointer file, int line, @Const @ByRef Status s); public native void CtxFailureWithWarning(String file, int line, @Const @ByRef Status s); // Unrecommended functions: these are functions that have some // current uses but are not recommended for use, and may go away at // some future major version release. // May be used, e.g., to get GPU handles, etc. // // Currently only used to call MakeTensorFromProto() for // implementing ConstantOp for every device. See comments // on Device::MakeTensorFromProto for longer-term replacement // ideas. public native DeviceBase device(); } // TODO(mrry): Consider converting to a random_access_iterator, and upgrading // tensorflow::gtl::iterator_range to make the below container classes // unnecessary. // Utility class for representing a list of immutable input tensors // that are passed to the op as a single named argument. @Namespace("tensorflow") @NoOffset public static class OpInputList extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OpInputList(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public OpInputList(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public OpInputList position(long position) { return (OpInputList)super.position(position); } public OpInputList() { super((Pointer)null); allocate(); } private native void allocate(); public OpInputList(OpKernelContext ctx, int start, int stop) { super((Pointer)null); allocate(ctx, start, stop); } private native void allocate(OpKernelContext ctx, int start, int stop); public native @ByRef @Name("operator =") OpInputList put(@Const @ByRef OpInputList other); public native @Const @ByRef @Name("operator []") Tensor get(int i); public native int size(); } // Utility class for representing a list of mutable ("ref") input tensors // that are passed to the op as a single named argument. @Namespace("tensorflow") @NoOffset public static class OpMutableInputList extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OpMutableInputList(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public OpMutableInputList(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public OpMutableInputList position(long position) { return (OpMutableInputList)super.position(position); } public OpMutableInputList(OpKernelContext ctx, int start, int stop) { super((Pointer)null); allocate(ctx, start, stop); } private native void allocate(OpKernelContext ctx, int start, int stop); public OpMutableInputList() { super((Pointer)null); allocate(); } private native void allocate(); public native @ByRef @Name("operator =") OpMutableInputList put(@Const @ByRef OpMutableInputList other); public native @ByVal Tensor at(int i, @Cast("bool") boolean lock_held); public native @Cast("tensorflow::mutex*") Pointer ref_mutex(int i); public native int size(); } // Utility class for representing a list of output tensors that are // grouped as a single named output. @Namespace("tensorflow") @NoOffset public static class OpOutputList extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OpOutputList(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public OpOutputList(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public OpOutputList position(long position) { return (OpOutputList)super.position(position); } public OpOutputList() { super((Pointer)null); allocate(); } private native void allocate(); public OpOutputList(OpKernelContext ctx, int start, int stop) { super((Pointer)null); allocate(ctx, start, stop); } private native void allocate(OpKernelContext ctx, int start, int stop); public native @ByRef @Name("operator =") OpOutputList put(@Const @ByRef OpOutputList other); public native @Name("operator []") Tensor get(int i); public native @Cast("bool") boolean required(int i); public native @Cast("tensorflow::DataType") int expected_output_dtype(int i); public native @ByVal @Name("allocate") Status _allocate(int i, @Const @ByRef TensorShape shape, @Cast("tensorflow::Tensor**") PointerPointer output); public native @ByVal @Name("allocate") Status _allocate(int i, @Const @ByRef TensorShape shape, @ByPtrPtr Tensor output); public native void set(int i, @Const @ByRef Tensor tensor); public native void set_ref(int i, @Cast("tensorflow::mutex*") Pointer mu, Tensor tensor_for_ref); public native int size(); } // Holds a tensor or tensor reference. For tensor references, we need // a mutex to prevent concurrent access to the tensor. @Namespace("tensorflow") @NoOffset public static class TensorValue extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorValue(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TensorValue(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public TensorValue position(long position) { return (TensorValue)super.position(position); } public TensorValue() { super((Pointer)null); allocate(); } private native void allocate(); public TensorValue(Tensor t) { super((Pointer)null); allocate(t); } private native void allocate(Tensor t); public TensorValue(@Cast("tensorflow::mutex*") Pointer mu, Tensor t) { super((Pointer)null); allocate(mu, t); } private native void allocate(@Cast("tensorflow::mutex*") Pointer mu, Tensor t); public native @Name("operator ->") Tensor access(); public native @Cast("bool") boolean is_ref(); public native @Cast("tensorflow::mutex*") Pointer mutex_if_ref(); public native TensorValue mutex_if_ref(Pointer mutex_if_ref); // nullptr if not a ref, != nullptr if a ref public native Tensor tensor(); public native TensorValue tensor(Tensor tensor); } @Namespace("tensorflow") @NoOffset public static class OpKernelContext extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OpKernelContext(Pointer p) { super(p); } // The first element of a WrappedAllocator is a "base" Allocator and // the second element is that Allocator wrapped by a // TrackingAllocator // TODO(zhifengc): Do some cleanup of Params. // The Params struct is passed in to initialize an OpKernelContext, // and must outlive the OpKernelContext. public static class Params extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Params() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Params(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Params(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Params position(long position) { return (Params)super.position(position); } // The step being executed. public native @Cast("tensorflow::int64") long step_id(); public native Params step_id(long step_id); // The op kernel being computed. public native OpKernel op_kernel(); public native Params op_kernel(OpKernel op_kernel); // The device on which the kernel is running. public native DeviceBase device(); public native Params device(DeviceBase device); // The Eigen GPU device wrapper, which may include a per-op // wrapped allocator. The concrete type of this object depends on // the type of this->device, so eigen_gpu_device can't be an // inline member and must be heap allocated. However, we don't // want to allocate a new eigen_gpu_device for every Op that is // executed. Instead this member is allocated on first use using // ensure_eigen_gpu_device, and then if the Params structure is // re-used for subsequent Ops, the eigen_gpu_device is // ReInitialized in the OpKernelContext constructor. Unlike the // other pointers in Params, this one is owned by Params. public native PerOpGpuDevice eigen_gpu_device(); public native Params eigen_gpu_device(PerOpGpuDevice eigen_gpu_device); public native void ensure_eigen_gpu_device(); public native @Cast("bool") boolean track_allocations(); public native Params track_allocations(boolean track_allocations); public native @Cast("bool") boolean log_memory(); public native Params log_memory(boolean log_memory); public native @Cast("bool") boolean record_tensor_accesses(); public native Params record_tensor_accesses(boolean record_tensor_accesses); // Array indexed by output number for this node @MemberGetter public native @Const AllocatorAttributes output_attr_array(); // Shared resources accessible by this op kernel invocation. public native ResourceMgr resource_manager(); public native Params resource_manager(ResourceMgr resource_manager); // Per-step resources accessible by this op kernel invocation should be // stored in this container.. public native ScopedStepContainer step_container(); public native Params step_container(ScopedStepContainer step_container); // Mechanism used by this op kernel invocation to communicate with // computations running on other devices. public native Rendezvous rendezvous(); public native Params rendezvous(Rendezvous rendezvous); // Mechanism for executing a collective op that needs to coordinate // with parallel instances running on other devices. public native CollectiveExecutor collective_executor(); public native Params collective_executor(CollectiveExecutor collective_executor); // The session state for this op. public native SessionState session_state(); public native Params session_state(SessionState session_state); // The tensor store for this op. public native TensorStore tensor_store(); public native Params tensor_store(TensorStore tensor_store); // Mechanism used by this op kernel invocation to register a callback // for its cancellation. public native CancellationManager cancellation_manager(); public native Params cancellation_manager(CancellationManager cancellation_manager); // Inputs to this op kernel. @MemberGetter public native @Const TensorValueVector inputs(); public native @Cast("bool") boolean is_input_dead(); public native Params is_input_dead(boolean is_input_dead); @MemberGetter public native @Const AllocatorAttributesVector input_alloc_attrs(); // Device contexts. @MemberGetter public native @Const DeviceContextInlinedVector input_device_contexts(); public native DeviceContext op_device_context(); public native Params op_device_context(DeviceContext op_device_context); // Control-flow op supports. public native @ByRef FrameAndIter frame_iter(); public native Params frame_iter(FrameAndIter frame_iter); // Function call supports. public native CallFrameInterface call_frame(); public native Params call_frame(CallFrameInterface call_frame); public native FunctionLibraryRuntime function_library(); public native Params function_library(FunctionLibraryRuntime function_library); public native @Cast("std::function)>*") Pointer runner(); public native Params runner(Pointer runner); public native StepStatsCollectorInterface stats_collector(); public native Params stats_collector(StepStatsCollectorInterface stats_collector); // TensorSliceReaderCache support. public native TensorSliceReaderCacheWrapper slice_reader_cache(); public native Params slice_reader_cache(TensorSliceReaderCacheWrapper slice_reader_cache); // Support for forwarding reservations (used by ScopedAllocator). @MemberGetter public static native int kNeverForward(); public static final int kNeverForward = kNeverForward(); @MemberGetter public static native int kNoReservation(); public static final int kNoReservation = kNoReservation(); // Values in [0,...) represent reservations for the indexed output. @MemberGetter public native @Const IntPointer forward_from_array(); } // params must outlive the OpKernelContext. public OpKernelContext(Params params) { super((Pointer)null); allocate(params); } private native void allocate(Params params); public OpKernelContext(Params params, int noutputs) { super((Pointer)null); allocate(params, noutputs); } private native void allocate(Params params, int noutputs); public native Env env(); public native @Cast("tensorflow::int64") long step_id(); public native @Const @ByRef OpKernel op_kernel(); // Input/output signature. public native int num_inputs(); public native @Cast("tensorflow::DataType") int input_dtype(int index); public native @ByVal Status input_dtype(@StringPiece BytePointer name, @Cast("tensorflow::DataType*") IntPointer dtype); public native @ByVal Status input_dtype(@StringPiece String name, @Cast("tensorflow::DataType*") IntPointer dtype); public native @Cast("tensorflow::MemoryType") int input_memory_type(int index); public native int num_outputs(); public native @Cast("tensorflow::DataType") int expected_output_dtype(int index); public native @Cast("tensorflow::MemoryType") int output_memory_type(int index); // Input // Returns an immutable input tensor. May only be used for non-Ref // inputs. For Ref inputs use mutable_input below. // REQUIRES: !IsRefType(input_dtype(index)) // TODO(mrry): Convert this to return Status. public native @Const @ByRef Tensor input(int index); // Returns the named immutable input tensor in "tensor", as defined // in the OpDef. May only be used for non-Ref inputs. For Ref inputs // use mutable_input below. // REQUIRES: !IsRefType(input_dtype(index)) // REQUIRES: the named input must not be a list. public native @ByVal Status input(@StringPiece BytePointer name, @Cast("const tensorflow::Tensor**") PointerPointer tensor); public native @ByVal Status input(@StringPiece BytePointer name, @Const @ByPtrPtr Tensor tensor); public native @ByVal Status input(@StringPiece String name, @Const @ByPtrPtr Tensor tensor); // Returns the named list-valued immutable input in "list", as // defined in the OpDef. If the named output is not list-valued, // returns a one-element list. May only be used for non-Ref // inputs. For Ref inputs use mutable_input below. // REQUIRES: !IsRefType(input_dtype(index)) public native @ByVal Status input_list(@StringPiece BytePointer name, OpInputList list); public native @ByVal Status input_list(@StringPiece String name, OpInputList list); // For mutable inputs, use the following together to make sure there // is no concurrent access to mutable_input(), e.g.: // { // Tensor& t = context->mutable_input(index); // mutex_lock lock(*context->input_ref_mutex(index)); // // modify the values in t // } // REQUIRES: IsRefType(input_dtype(index)) public native @ByVal Status input_ref_mutex(@StringPiece BytePointer name, @Cast("tensorflow::mutex**") PointerPointer out_mutex); public native @ByVal Status input_ref_mutex(@StringPiece BytePointer name, @Cast("tensorflow::mutex**") @ByPtrPtr Pointer out_mutex); public native @ByVal Status input_ref_mutex(@StringPiece String name, @Cast("tensorflow::mutex**") @ByPtrPtr Pointer out_mutex); // Returns a mutable input tensor. Must be used to access Ref // inputs. REQUIRES: IsRefType(input_dtype(index)). The caller may // modify the values stored in the Tensor buffer, and modifications // will be visible to other Ops reading the same ref tensor. If // !lock_held the input mutex will be acquired before returning the // Tensor. // TODO(mrry): Convert this to return Status. public native @ByVal Tensor mutable_input(int index, @Cast("bool") boolean lock_held); // Returns the named mutable input tensor in "tensor", as defined in // the OpDef. Must be used to access Ref inputs. The values stored // in the Tensor buffer may be modified, and modifications will be // visible to other Ops reading the same ref tensor. If !lock_held // the input mutex will be acquired before returning the Tensor. // REQUIRES: the named input must not be a list. // REQUIRES: the named input must be a ref tensor. public native @ByVal Status mutable_input(@StringPiece BytePointer name, Tensor tensor, @Cast("bool") boolean lock_held); public native @ByVal Status mutable_input(@StringPiece String name, Tensor tensor, @Cast("bool") boolean lock_held); // Returns the named list-valued mutable input in "list", as defined // in the OpDef. If the named input is not list-valued, returns a // one-element list. Must be used to access Ref inputs. The values // stored in the Tensor buffer may be modified, and modifications // will be visible to other Ops reading the same ref tensor. // REQUIRES: the named input must be a ref tensor. public native @ByVal Status mutable_input_list(@StringPiece BytePointer name, OpMutableInputList list); public native @ByVal Status mutable_input_list(@StringPiece String name, OpMutableInputList list); // Replace the corresponding Ref Input to use the storage buffer // used by tensor. If !lock_held the input mutex will be acquired // before returning the Tensor. // REQUIRES: IsRefType(input_dtype(index)). public native void replace_ref_input(int index, @Const @ByRef Tensor tensor, @Cast("bool") boolean lock_held); // Replace the corresponding named Ref Input to use the storage // buffer used by tensor. If !lock_held the input mutex will be // acquired before returning the Tensor. // REQUIRES: IsRefType(input_dtype(index)). public native @ByVal Status replace_ref_input(@StringPiece BytePointer name, @Const @ByRef Tensor tensor, @Cast("bool") boolean lock_held); public native @ByVal Status replace_ref_input(@StringPiece String name, @Const @ByRef Tensor tensor, @Cast("bool") boolean lock_held); // Deletes the Tensor object used as the Ref Input at // input_index. This is not usually necessary and should be used // with caution. If !lock_held the input mutex will be acquired // before returning the Tensor. // REQUIRES: IsRefType(input_dtype(input_index)). public native void delete_ref_input(int input_index, @Cast("bool") boolean lock_held); // Return true if there is input at the given index. An operator has no // input at index if its tensor is null. This is primarily used by the // merge operator. // TODO(mrry): Convert this to return Status. public native @Cast("bool") boolean has_input(int index); // Returns true if all inputs are the same shape, otherwise sets the // status to a non-OK value and returns false. // Usage: if (!context->ValidateInputsAreSameShape(this)) return; public native @Cast("bool") boolean ValidateInputsAreSameShape(OpKernel op); // Input to output forwarding. // Set the output Ref Tensor at output_index to be an alias of the // input Ref Tensor at input_index. // REQUIRES: IsRefType(input_dtype(input_index)). // REQUIRES: IsRefType(output_dtype(output_index)). public native void forward_ref_input_to_ref_output(int input_index, int output_index); // Returns true when an alias to input[input_index], reshaped to output_shape, // which is safe to use for in-place computation was written to *output. // Returns false if input[input_index] has a refcount greater than one, or if // its type does not match the expected output type of output[output_index], // or the number of elements in input[input_index] does not equal the number // of elements in output_shape. public native @Cast("bool") boolean forward_input_to_output_with_shape(int input_index, int output_index, @Const @ByRef TensorShape output_shape, @Cast("tensorflow::Tensor**") PointerPointer output); public native @Cast("bool") boolean forward_input_to_output_with_shape(int input_index, int output_index, @Const @ByRef TensorShape output_shape, @ByPtrPtr Tensor output); public native @ByVal Status forward_input_to_output_with_shape(@StringPiece BytePointer input_name, @StringPiece BytePointer output_name, @Const @ByRef TensorShape output_shape, @Cast("tensorflow::Tensor**") PointerPointer output); public native @ByVal Status forward_input_to_output_with_shape(@StringPiece BytePointer input_name, @StringPiece BytePointer output_name, @Const @ByRef TensorShape output_shape, @ByPtrPtr Tensor output); public native @ByVal Status forward_input_to_output_with_shape(@StringPiece String input_name, @StringPiece String output_name, @Const @ByRef TensorShape output_shape, @ByPtrPtr Tensor output); // Returns a pointer to a Tensor aliasing the underlying buffer backing // input[input_index] iff // * input[input_index] is not a ref, // * the data type, shape, memory type, and allocator attributes of // input[input_index] are compatible with those given in dtype, shape, // memory_type, and attr, // * refcount on the underlying buffer is one. // * Either there is no forwarding reservation for either input_index // or output_index or the specified input is reserved for the specified // output. More precisely: // // These cases mean neither input nor output has a reservation: // forward_from_array = nullptr // OR (input_index is not in forward_from_array AND // (output_index == kNoReservation OR // forward_from_array[output_index] == kNoReservation)) // // This case means that input_index is reserved for output_index: // forward_from_array[output_index] == input_index // // This case means the output is reserved to always be allocated, // never assigned a forwarded input: // forward_from_array[output_index] == kNeverForward // // Otherwise returns nullptr. // NOTE: For Cuda kernels that read inputs using the __ldg() intrinsic, // forwarding is only safe if there are no reads via __ldg() after writes // to the same address. public native @UniquePtr Tensor forward_input( int input_index, int output_index, @Cast("tensorflow::DataType") int output_dtype, @Const @ByRef TensorShape output_shape, @Cast("tensorflow::MemoryType") int output_memory_type, @Const @ByRef AllocatorAttributes output_attr); // Tries to forward one of the inputs given in input_indices to // output[output_index]. If none of the given inputs can be forwarded, calls // allocate_output() to allocate a new output buffer. public native @ByVal Status forward_input_or_allocate_output( @ArraySlice IntPointer candidate_input_indices, int output_index, @Const @ByRef TensorShape output_shape, @Cast("tensorflow::Tensor**") PointerPointer output); public native @ByVal Status forward_input_or_allocate_output( @ArraySlice IntPointer candidate_input_indices, int output_index, @Const @ByRef TensorShape output_shape, @ByPtrPtr Tensor output); public native @ByVal Status forward_input_or_allocate_output( @ArraySlice IntBuffer candidate_input_indices, int output_index, @Const @ByRef TensorShape output_shape, @ByPtrPtr Tensor output); public native @ByVal Status forward_input_or_allocate_output( @ArraySlice int[] candidate_input_indices, int output_index, @Const @ByRef TensorShape output_shape, @ByPtrPtr Tensor output); public native @ByVal Status forward_input_or_allocate_output( @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringPieceVector candidate_input_names, @StringPiece BytePointer output_name, @Const @ByRef TensorShape output_shape, @Cast("tensorflow::Tensor**") PointerPointer output); public native @ByVal Status forward_input_or_allocate_output( @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringPieceVector candidate_input_names, @StringPiece BytePointer output_name, @Const @ByRef TensorShape output_shape, @ByPtrPtr Tensor output); public native @ByVal Status forward_input_or_allocate_output( @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringPieceVector candidate_input_names, @StringPiece String output_name, @Const @ByRef TensorShape output_shape, @ByPtrPtr Tensor output); // Tries to reuse one of the inputs given in input_indices as a temporary. // If none of the given inputs can be forwarded, calls // allocate_temp() to allocate a new temporary buffer. public native @ByVal Status forward_input_or_allocate_temp( @ArraySlice IntPointer candidate_input_indices, @Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape, @Const @ByRef AllocatorAttributes allocator_attr, Tensor out_temp); public native @ByVal Status forward_input_or_allocate_temp( @ArraySlice IntBuffer candidate_input_indices, @Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape, @Const @ByRef AllocatorAttributes allocator_attr, Tensor out_temp); public native @ByVal Status forward_input_or_allocate_temp( @ArraySlice int[] candidate_input_indices, @Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape, @Const @ByRef AllocatorAttributes allocator_attr, Tensor out_temp); public native @ByVal Status forward_input_or_allocate_temp( @ArraySlice IntPointer candidate_input_indices, @Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape, Tensor out_temp); public native @ByVal Status forward_input_or_allocate_temp( @ArraySlice IntBuffer candidate_input_indices, @Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape, Tensor out_temp); public native @ByVal Status forward_input_or_allocate_temp( @ArraySlice int[] candidate_input_indices, @Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape, Tensor out_temp); // Output // Returns the named list-valued output in "list", as defined in the OpDef. // If the named output is not list-valued, returns a one-element list. public native @ByVal Status output_list(@StringPiece BytePointer name, OpOutputList list); public native @ByVal Status output_list(@StringPiece String name, OpOutputList list); // If output_required(index) returns true, the OpKernel's Compute() method // should call allocate_output(index, ...), set_output(index, ...), // set_output_ref(index, ...), or set the status to a non-ok value. // If it returns false, it may output, but is not required to do so. // TODO(mrry): Convert this to return Status, and implement a string // name version. public native @Cast("bool") boolean output_required(int index); // Allocation of tensors during kernel execution inside the Compute // method: // // There are three methods to allocate Tensors when an Op kernel // executes. // // 1) allocate_persistent. This is only needed for Tensors that will // be stored by the Op between invocations, and it *must* be used // for those Tensors. The call returns a PersistentTensor, and that // is the only object the Op is allowed to hold on to between // invocations. When the Tensor is needed in a subsequent // invocation, it can be retrieved from the PersistentTensor using // the AccessTensor method. This ensures that the system is made // aware of any use of the tensor's allocated memory, which is // needed for correctness on asynchronous devices such as GPUs. // // 2) allocate_output. This should be used to allocate any tensor // that is going to be used as an output from the Op at the end of // the current execution. The caller indicates which output the // Tensor will be assigned to, and the call returns the // newly-allocated Tensor. The Tensor can subsequently be assigned // to during kernel execution, and will be used as the designated // output when the kernel execution completes. // // 3) allocate_temp. This should be used to allocate any scratch // storage that is needed while the kernel is executing, and will // not be retained by the Op. // // In some cases a Tensor needs to be used as an output even though // it was previously allocated elsewhere. The Tensor may have been // passed as an input, or stored in a PersistentTensor during a // previous kernel execution, or allocated earlier in the kernel // execution at a time when it was not known which output it would // be assigned to. In this case the kernel can use set_output or // set_output_ref to indicate that the tensor should be used as the // designated output. It is legal to use any previously-allocated // Tensor as an argument to set_output or set_output_ref, including // Tensors allocated via allocate_temp. There may be a performance // penalty to using a Tensor that was not allocated using // allocate_output. This is because allocate_output uses the // AllocatorAttributes stored in output_attr_array for the // designated output. In some cases, using the wrong attributes may // cause an extra copy of the Tensor's buffer. // Allocates output for the specified output index with shape. // OpKernelContext retains ownership of the returned pointer. See // comment above. // // If memory allocation fails, returns an error status. // // REQUIRES: !IsRefType(expected_output_dtype(index)) public native @ByVal Status allocate_output(int index, @Const @ByRef TensorShape shape, @Cast("tensorflow::Tensor**") PointerPointer tensor); public native @ByVal Status allocate_output(int index, @Const @ByRef TensorShape shape, @ByPtrPtr Tensor tensor); public native @ByVal Status allocate_output(@StringPiece BytePointer name, @Const @ByRef TensorShape shape, @Cast("tensorflow::Tensor**") PointerPointer tensor); public native @ByVal Status allocate_output(@StringPiece BytePointer name, @Const @ByRef TensorShape shape, @ByPtrPtr Tensor tensor); public native @ByVal Status allocate_output(@StringPiece String name, @Const @ByRef TensorShape shape, @ByPtrPtr Tensor tensor); // The following methods use the supplied attributes instead of // those in output_attr_array. The caller is responsible for // ensuring that the attributes are "compatible" with the // output_attr_array, e.g. the tensor is allocated on the correct // device. See comment above. public native @ByVal Status allocate_output(int index, @Const @ByRef TensorShape shape, @Cast("tensorflow::Tensor**") PointerPointer tensor, @ByVal AllocatorAttributes attr); public native @ByVal Status allocate_output(int index, @Const @ByRef TensorShape shape, @ByPtrPtr Tensor tensor, @ByVal AllocatorAttributes attr); public native @ByVal Status allocate_output(@StringPiece BytePointer name, @Const @ByRef TensorShape shape, @Cast("tensorflow::Tensor**") PointerPointer tensor, @ByVal AllocatorAttributes attr); public native @ByVal Status allocate_output(@StringPiece BytePointer name, @Const @ByRef TensorShape shape, @ByPtrPtr Tensor tensor, @ByVal AllocatorAttributes attr); public native @ByVal Status allocate_output(@StringPiece String name, @Const @ByRef TensorShape shape, @ByPtrPtr Tensor tensor, @ByVal AllocatorAttributes attr); // Allocates a temporary Tensor of the specified type and // shape. Devices such as GPUs that enqueue Ops for lazy execution // may retain references to the temporary tensors after the Op's // Compute method has run. See comment above. public native @ByVal Status allocate_temp(@Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape, Tensor out_temp, @ByVal AllocatorAttributes allocator_attr, @Const @ByRef AllocationAttributes allocation_attr); public native @ByVal Status allocate_temp(@Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape, Tensor out_temp, @ByVal AllocatorAttributes allocator_attr); public native @ByVal Status allocate_temp(@Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape, Tensor out_temp); // Allocates a Tensor of the specified type and shape which the Op // plans to maintain as persistent state. out_persistent holds the // PersistentTensor which is the object the caller should store. For // convenience, if out_tensor is non-null then it will be filled in // with a Tensor* pointing to the newly-allocated tensor which the // caller can use instead of calling // out_persistent->AccessTensor. The caller does not own out_tensor // and should not keep a copy of it. See comment above. public native @ByVal Status allocate_persistent(@Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape, PersistentTensor out_persistent, @Cast("tensorflow::Tensor**") PointerPointer out_tensor, @ByVal AllocatorAttributes attr); public native @ByVal Status allocate_persistent(@Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape, PersistentTensor out_persistent, @ByPtrPtr Tensor out_tensor, @ByVal AllocatorAttributes attr); public native @ByVal Status allocate_persistent(@Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape, PersistentTensor out_persistent, @Cast("tensorflow::Tensor**") PointerPointer out_tensor); public native @ByVal Status allocate_persistent(@Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape, PersistentTensor out_persistent, @ByPtrPtr Tensor out_tensor); // Copies a tensor (allocated by the caller) to the specified output // index. REQUIRES: !IsRefType(expected_output_dtype(index)) // REQUIRES: 'tensor' must have the same MemoryType as // output_memory_types[index]. See comment above. public native @ByVal Status set_output(@StringPiece BytePointer name, @Const @ByRef Tensor tensor); public native @ByVal Status set_output(@StringPiece String name, @Const @ByRef Tensor tensor); // To output a reference. Caller retains ownership of mu and tensor_for_ref, // and they must outlive all uses within the step. See comment above. // REQUIRES: IsRefType(expected_output_dtype(index)) public native @ByVal Status set_output_ref(@StringPiece BytePointer name, @Cast("tensorflow::mutex*") Pointer mu, Tensor tensor_for_ref); public native @ByVal Status set_output_ref(@StringPiece String name, @Cast("tensorflow::mutex*") Pointer mu, Tensor tensor_for_ref); // Returns nullptr if allocate_output() or set_output() have not been called. public native @ByVal Status mutable_output(@StringPiece BytePointer name, @Cast("tensorflow::Tensor**") PointerPointer tensor); public native @ByVal Status mutable_output(@StringPiece BytePointer name, @ByPtrPtr Tensor tensor); public native @ByVal Status mutable_output(@StringPiece String name, @ByPtrPtr Tensor tensor); // Records device specific state about how the input tensors were // computed. // // If using the templated function, the type must be a subclass // of DeviceContext. // // Get the DeviceContext used for the index input. Returns nullptr // if no DeviceContext was provided. public native DeviceContext input_device_context(int index); // Return the DeviceContext that should be used for this Op. // // If using the templated function, the type must be a subclass // of DeviceContext. // // Returns nullptr if the device did not provide one. public native DeviceContext op_device_context(); public native @ByVal AllocatorAttributes input_alloc_attr(int index); public native @ByVal AllocatorAttributes output_alloc_attr(int index); public native @ByVal WrappedAllocatorVector wrapped_allocators(); // Communication. // // An op kernel communicates with outside environment through // Rendezvous Send() and Recv(). public native Rendezvous rendezvous(); public native CollectiveExecutor collective_executor(); // An op kernel can access the session state it belongs to. public native SessionState session_state(); // An op kernel can access the tensor store of the run it belongs to. public native TensorStore tensor_store(); // Function call support. // // If this kernel invocation is within a function execution, // call_frame() returns the call frame for the function call. public native CallFrameInterface call_frame(); // If not nullptr, the kernel invoke functions defined in the // library. E.g., CHECK_NOTNULL(function_library())->Run("Foo", ...). public native FunctionLibraryRuntime function_library(); public native @Cast("std::function)>*") Pointer runner(); public native StepStatsCollectorInterface stats_collector(); // Shared resources accessible to this kernel. public native ResourceMgr resource_manager(); public native TensorSliceReaderCacheWrapper slice_reader_cache(); // Execution. // // OpKernels can use these eigen devices to carry out their // numerical computation. public native @Const @ByRef ThreadPoolDevice eigen_cpu_device(); public native @Const @ByRef GpuDevice eigen_gpu_device(); // #ifdef TENSORFLOW_USE_SYCL // #endif // Error handling. // If expected_inputs == inputs() and expected_outputs == output_types(), // returns OK, else returns INVALID_ARGUMENT with an error message. // Recommended for Ops with dynamic signatures, where validation can only // be performed at runtime. public native @ByVal Status MatchSignature(@ByVal @Cast("const tensorflow::DataTypeSlice*") DataTypeVector expected_inputs, @ByVal @Cast("const tensorflow::DataTypeSlice*") DataTypeVector expected_outputs); // An OpKernel should call SetStatus() if Compute() encounters an // error. public native void SetStatus(@Const @ByRef Status status); public native @Const @ByRef Status status(); // Cancellation. // // EXPERIMENTAL. See the implementation in tensorflow::TensorQueue for an // example of how to use this API. public native CancellationManager cancellation_manager(); // Other accessors. // For control flow. public native @ByVal FrameAndIter frame_iter(); public native @Cast("bool") boolean is_input_dead(); // May be used, e.g., to get GPU handles, etc. // TODO(tucker): Add example usage. public native DeviceBase device(); // Retrieve list of referenced tensors in out_vector. Once this is // called, it is not legal to reference any more tensors. Should // not be called from Op kernels. public native void retrieve_accessed_tensors(@Cast("tensorflow::TensorReferenceVector*") AllocatorAttributesVector out_vector); // Per-step container for use by white-listed internal ops. public native ScopedStepContainer step_container(); // Helper routines for the OP_REQUIRES macros public native void CtxFailure(@Const @ByRef Status s); public native void CtxFailureWithWarning(@Const @ByRef Status s); public native void CtxFailure(@Cast("const char*") BytePointer file, int line, @Const @ByRef Status s); public native void CtxFailure(String file, int line, @Const @ByRef Status s); public native void CtxFailureWithWarning(@Cast("const char*") BytePointer file, int line, @Const @ByRef Status s); public native void CtxFailureWithWarning(String file, int line, @Const @ByRef Status s); // Unrecommended functions: these are functions that have some // current uses but are not recommended for use, and may go away at // some future major version release. // // The following functions all have versions that return Status // to capture error conditions, and are strongly preferred. public native Tensor mutable_output(int index); public native void set_output(int index, @Const @ByRef Tensor tensor); public native @Cast("tensorflow::mutex*") Pointer input_ref_mutex(int index); public native void set_output_ref(int index, @Cast("tensorflow::mutex*") Pointer mu, Tensor tensor_for_ref); public native @ByVal TensorValue release_output(int index); public native @Cast("bool") boolean track_allocations(); // Records temp memory allocation. Tensor object is recorded to identify the // case where temp memory is used as output memory. public native void record_temp_memory_allocation(@Cast("tensorflow::int64") long size, @Const @ByRef Tensor t); // Returns recorded size of temporary memory; public native @Cast("tensorflow::int64") long temp_memory_allocated(); // Records persistent memory allocation, size can be negative indicating // deallocation. public native void record_persistent_memory_allocation(@Cast("tensorflow::int64") long size, @Cast("tensorflow::int64") long alloc_id/*=-1*/); public native void record_persistent_memory_allocation(@Cast("tensorflow::int64") long size); // Returns recorded size and ids of persistent memory. public native @Cast("tensorflow::int64") long persistent_memory_allocated(); public native @Cast("tensorflow::int64*") @StdVector LongPointer persistent_alloc_ids(); // Resets counters for temp and persistent memory and recorded ids. public native void clear_recorded_memory(); public native @Cast("bool") boolean input_is_ref(int index); } // Register your OpKernel by specifying the Op's name, the device the // kernel runs on, any type attr constraints for this kernel, any // host-memory args, and the class to instantiate. Examples: // // // A kernel that supports all types. // REGISTER_KERNEL_BUILDER(Name("Save").Device(DEVICE_CPU), SaveOp); // // // The following are equivalent ways of specifying that the kernel only // // works if the "T" type attr is set to DT_FLOAT. // REGISTER_KERNEL_BUILDER( // Name("Sub").Device(DEVICE_CPU).TypeConstraint("T"), // SubOp); // // (You would then repeat this for every type supported by "Sub".) // // // This form allows you to specify a list of types as the constraint. // REGISTER_KERNEL_BUILDER(Name("Sub") // .Device(DEVICE_CPU) // .TypeConstraint("T", {DT_FLOAT}), // SubOp); // // // A kernel that expects one of the input tensors in host memory. // REGISTER_KERNEL_BUILDER( // Name("Reshape").Device(DEVICE_GPU).HostMemory("shape"), ReshapeOp); // // See kernel_def_builder for details. // Instantiate an OpKernel that has been registered. Returns nullptr // if no operation for that type of device / input signature combination // (and a NOT_FOUND *status), or there is an error in construction (and // an INVALID_ARGUMENT *status). Otherwise, the caller takes ownership // of the returned pointer. // EXPECTED USAGE: unique_ptr op = CreateOpKernel(...); // REQUIRES: def has all attrs specified (e.g. using AddDefaultsToNodeDef()). @Namespace("tensorflow") public static native @UniquePtr OpKernel CreateOpKernel(@ByVal DeviceType device_type, DeviceBase device, Allocator allocator, @Const @ByRef NodeDef def, int graph_def_version, Status status); @Namespace("tensorflow") public static native @ByVal Status CreateOpKernel(@ByVal DeviceType device_type, DeviceBase device, Allocator allocator, FunctionLibraryRuntime flib, @Const @ByRef NodeDef def, int graph_def_version, @Cast("tensorflow::OpKernel**") PointerPointer kernel); @Namespace("tensorflow") public static native @ByVal Status CreateOpKernel(@ByVal DeviceType device_type, DeviceBase device, Allocator allocator, FunctionLibraryRuntime flib, @Const @ByRef NodeDef def, int graph_def_version, @ByPtrPtr OpKernel kernel); // Returns into 'device_types' the subset of prioritized_types that this // binary has registered for the given NodeDef. // // REQUIRES: * 'device_types' is not nullptr. // * def has all attrs specified (e.g. using AddDefaultsToNodeDef()). @Namespace("tensorflow") public static native @ByVal Status SupportedDeviceTypesForNode( @StdVector DeviceType prioritized_types, @Const @ByRef NodeDef def, DeviceTypeVector device_types); // Returns a message with a description of the kernels registered for op // `op_name`. @Namespace("tensorflow") public static native @StdString BytePointer KernelsRegisteredForOp(@StringPiece BytePointer op_name); @Namespace("tensorflow") public static native @StdString String KernelsRegisteredForOp(@StringPiece String op_name); // Call once after Op registration has completed. @Namespace("tensorflow") public static native @ByVal Status ValidateKernelRegistrations(@Const @ByRef OpRegistryInterface op_registry); // ----------------------------------------------------------------------------- // OpKernel registration implementation follows, please ignore. // Allow the REGISTER_KERNEL_BUILDER(Name("op_name").Device(...)...) syntax. @Name("tensorflow::register_kernel::Name") public static class RegisterKernelName extends KernelDefBuilder { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RegisterKernelName(Pointer p) { super(p); } // With selective registration, kernels whose implementation class is not used // by any kernel are disabled with the SHOULD_REGISTER_OP_KERNEL call in // REGISTER_KERNEL_BUILDER_UNIQ. However, an unused kernel that shares an // implementation class with a used kernel would get through that mechanism. // // This mechanism stops that registration by changing the name of the kernel // for the unused op to one that is ignored by // OpKernelRegistrar::InitInternal. Note that this method alone is // not sufficient - the compiler can't evaluate the entire KernelDefBuilder at // compilation time, so this method doesn't actually reduce code size. public RegisterKernelName(@Cast("const char*") BytePointer op) { super((Pointer)null); allocate(op); } private native void allocate(@Cast("const char*") BytePointer op); public RegisterKernelName(String op) { super((Pointer)null); allocate(op); } private native void allocate(String op); } @Name("tensorflow::register_kernel::system::Name") public static class RegisterKernelSystemName extends KernelDefBuilder { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RegisterKernelSystemName(Pointer p) { super(p); } // For system kernels, we ignore selective registration and // unconditionally register the kernel. public RegisterKernelSystemName(@Cast("const char*") BytePointer op) { super((Pointer)null); allocate(op); } private native void allocate(@Cast("const char*") BytePointer op); public RegisterKernelSystemName(String op) { super((Pointer)null); allocate(op); } private native void allocate(String op); } // namespace system // namespace register_kernel // #define REGISTER_KERNEL_BUILDER(kernel_builder, ...) // REGISTER_KERNEL_BUILDER_UNIQ_HELPER(__COUNTER__, kernel_builder, __VA_ARGS__) // #define REGISTER_KERNEL_BUILDER_UNIQ_HELPER(ctr, kernel_builder, ...) // REGISTER_KERNEL_BUILDER_UNIQ(ctr, kernel_builder, __VA_ARGS__) // #define REGISTER_KERNEL_BUILDER_UNIQ(ctr, kernel_builder, ...) // constexpr bool should_register_##ctr##__flag = // SHOULD_REGISTER_OP_KERNEL(#__VA_ARGS__); // static ::tensorflow::kernel_factory::OpKernelRegistrar // registrar__body__##ctr##__object( // should_register_##ctr##__flag // ? ::tensorflow::register_kernel::kernel_builder.Bid() // : nullptr, // #__VA_ARGS__, // [](::tensorflow::OpKernelConstruction* context) // -> ::tensorflow::OpKernel* { // return new __VA_ARGS__(context); // }); // The `REGISTER_SYSTEM_KERNEL_BUILDER()` macro acts as // `REGISTER_KERNEL_BUILDER()` except that the kernel is registered // unconditionally even when selective registration is used. // #define REGISTER_SYSTEM_KERNEL_BUILDER(kernel_builder, ...) // REGISTER_SYSTEM_KERNEL_BUILDER_UNIQ_HELPER(__COUNTER__, kernel_builder, // __VA_ARGS__) // #define REGISTER_SYSTEM_KERNEL_BUILDER_UNIQ_HELPER(ctr, kernel_builder, ...) // REGISTER_SYSTEM_KERNEL_BUILDER_UNIQ(ctr, kernel_builder, __VA_ARGS__) // #define REGISTER_SYSTEM_KERNEL_BUILDER_UNIQ(ctr, kernel_builder, ...) // static ::tensorflow::kernel_factory::OpKernelRegistrar // registrar__body__##ctr##__object( // ::tensorflow::register_kernel::system::kernel_builder.Bid(), // #__VA_ARGS__, // [](::tensorflow::OpKernelConstruction* context) // -> ::tensorflow::OpKernel* { // return new __VA_ARGS__(context); // }); @Namespace("tensorflow") public static native Pointer GlobalKernelRegistry(); // If node_def has a corresponding kernel registered on device_type, // returns OK and fill in the kernel def and kernel_class_name. and // may be null. @Namespace("tensorflow") public static native @ByVal Status FindKernelDef(@Const @ByRef DeviceType device_type, @Const @ByRef NodeDef node_def, @Cast("const tensorflow::KernelDef**") PointerPointer def, @StdString @Cast({"char*", "std::string*"}) BytePointer kernel_class_name); @Namespace("tensorflow") public static native @ByVal Status FindKernelDef(@Const @ByRef DeviceType device_type, @Const @ByRef NodeDef node_def, @Const @ByPtrPtr KernelDef def, @StdString @Cast({"char*", "std::string*"}) BytePointer kernel_class_name); // Writes a list of all registered kernels to LOG(INFO), to help users debug // missing kernel errors. @Namespace("tensorflow") public static native void LogAllRegisteredKernels(); // Gets a list of all registered kernels. @Namespace("tensorflow") public static native @ByVal KernelList GetAllRegisteredKernels(); // Gets a list of all registered kernels for which predicate returns true @Namespace("tensorflow") public static native @ByVal KernelList GetFilteredRegisteredKernels( @Const @ByRef KernelDefPredicateFn predicate); // Gets a list of all registered kernels for a given op @Namespace("tensorflow") public static native @ByVal KernelList GetRegisteredKernelsForOp(@StringPiece BytePointer op_name); @Namespace("tensorflow") public static native @ByVal KernelList GetRegisteredKernelsForOp(@StringPiece String op_name); @Namespace("tensorflow::kernel_factory") public static class OpKernelRegistrar extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OpKernelRegistrar(Pointer p) { super(p); } public static class Factory extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Factory(Pointer p) { super(p); } protected Factory() { allocate(); } private native void allocate(); public native OpKernel call(OpKernelConstruction arg0); } public OpKernelRegistrar(@Const KernelDef kernel_def, @StringPiece BytePointer kernel_class_name, Factory factory) { super((Pointer)null); allocate(kernel_def, kernel_class_name, factory); } private native void allocate(@Const KernelDef kernel_def, @StringPiece BytePointer kernel_class_name, Factory factory); public OpKernelRegistrar(@Const KernelDef kernel_def, @StringPiece String kernel_class_name, Factory factory) { super((Pointer)null); allocate(kernel_def, kernel_class_name, factory); } private native void allocate(@Const KernelDef kernel_def, @StringPiece String kernel_class_name, Factory factory); } // namespace kernel_factory // ----------------------------------------------------------------------------- // Template and inline method implementations, please ignore // no input if tensor == nullptr. // Convenience macros for asserting and handling exceptional conditions. // Analogous to the CHECK* macros provided by logging.h. // // Example use: // void Compute(OperationContext* context) { // OP_REQUIRES(context, context->num_inputs() == 2, // errors::InvalidArgument("FooOp requires 2 arguments")); // ... // Status status = SomeUncertainMethod(); // OP_REQUIRES_OK(context, status); // ... // } // Generate a fatal error if OP_REQUIRES or OP_REQUIRES_OK are used in // AsyncOpKernel implementations. If these macros are used and the condition // does not hold, the `done` callback will never be called and the system will // deadlock, so a crash failure is preferable. Since the OP_REQUIRES[_OK] macros // are legal to use in AsyncOpKernel constructors, we use overload resolution // to distinguish between OpKernelConstruction* and OpKernelContext* context // types. @Namespace("tensorflow") @Opaque public static class XlaOpKernelContext extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public XlaOpKernelContext() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public XlaOpKernelContext(Pointer p) { super(p); } } @Namespace("tensorflow") public static native void CheckNotInComputeAsync(XlaOpKernelContext arg0, @Cast("const char*") BytePointer arg1); @Namespace("tensorflow") public static native void CheckNotInComputeAsync(XlaOpKernelContext arg0, String arg1); @Namespace("tensorflow") public static native void CheckNotInComputeAsync(OpKernelConstruction arg0, @Cast("const char*") BytePointer arg1); @Namespace("tensorflow") public static native void CheckNotInComputeAsync(OpKernelConstruction arg0, String arg1); @Namespace("tensorflow") public static native void CheckNotInComputeAsync(OpKernelContext ctx, @Cast("const char*") BytePointer correct_macro_name); @Namespace("tensorflow") public static native void CheckNotInComputeAsync(OpKernelContext ctx, String correct_macro_name); // #define OP_REQUIRES(CTX, EXP, STATUS) // do { // if (!TF_PREDICT_TRUE(EXP)) { // CheckNotInComputeAsync((CTX), "OP_REQUIRES_ASYNC"); // (CTX)->CtxFailure(__FILE__, __LINE__, (STATUS)); // return; // } // } while (0) // #define OP_REQUIRES_OK(CTX, ...) // do { // ::tensorflow::Status _s(__VA_ARGS__); // if (!TF_PREDICT_TRUE(_s.ok())) { // CheckNotInComputeAsync((CTX), "OP_REQUIRES_OK_ASYNC"); // (CTX)->CtxFailureWithWarning(__FILE__, __LINE__, _s); // return; // } // } while (0) // #define OP_REQUIRES_ASYNC(CTX, EXP, STATUS, CALLBACK) // do { // if (!TF_PREDICT_TRUE(EXP)) { // (CTX)->CtxFailure(__FILE__, __LINE__, (STATUS)); // (CALLBACK)(); // return; // } // } while (0) // #define OP_REQUIRES_OK_ASYNC(CTX, STATUS, CALLBACK) // do { // ::tensorflow::Status _s(STATUS); // if (!TF_PREDICT_TRUE(_s.ok())) { // (CTX)->CtxFailureWithWarning(__FILE__, __LINE__, _s); // (CALLBACK)(); // return; // } // } while (0) // namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_OP_KERNEL_H_ // Parsed from tensorflow/core/framework/op_segment.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_FRAMEWORK_OP_SEGMENT_H_ // #define TENSORFLOW_FRAMEWORK_OP_SEGMENT_H_ // #include // #include // #include "tensorflow/core/framework/op_kernel.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/platform/macros.h" // #include "tensorflow/core/platform/mutex.h" // #include "tensorflow/core/platform/thread_annotations.h" // #include "tensorflow/core/platform/types.h" // OpSegment keeps track of OpKernels registered for sessions running // on a device. // // The implementation maintains a two-level map. The 1st level maps // session handle to the map of registered OpKernels. The 2nd level // map maps node names to instantiated OpKernel objects. // // Each 2-nd level map is reference-counted and the caller can call // AddHold to obtain a reference on all kernels of a session and // ensure these kernels are alive until a corresponding RemoveHold is // called on the same session. @Namespace("tensorflow") @NoOffset public static class OpSegment extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OpSegment(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public OpSegment(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public OpSegment position(long position) { return (OpSegment)super.position(position); } public OpSegment() { super((Pointer)null); allocate(); } private native void allocate(); // A hold can be placed on a session, preventing all its kernels // from being deleted. public native void AddHold(@StdString BytePointer session_handle); public native void AddHold(@StdString String session_handle); public native void RemoveHold(@StdString BytePointer session_handle); public native void RemoveHold(@StdString String session_handle); // If the kernel for "node_name" has been created in the // "session_handle", returns the existing op kernel in "*kernel". // Otherwise, creates the kernel by calling create_fn(), cache it, // and returns it in "*kernel". If create_fn() fails, returns the // error. // // OpSegment keeps the ownership of the returned "*kernel". public native @ByVal Status FindOrCreate(@StdString BytePointer session_handle, @StdString BytePointer node_name, @Cast("tensorflow::OpKernel**") PointerPointer kernel, @ByVal @Cast("tensorflow::OpSegment::CreateKernelFn*") Pointer create_fn); public native @ByVal Status FindOrCreate(@StdString BytePointer session_handle, @StdString BytePointer node_name, @ByPtrPtr OpKernel kernel, @ByVal @Cast("tensorflow::OpSegment::CreateKernelFn*") Pointer create_fn); public native @ByVal Status FindOrCreate(@StdString String session_handle, @StdString String node_name, @ByPtrPtr OpKernel kernel, @ByVal @Cast("tensorflow::OpSegment::CreateKernelFn*") Pointer create_fn); // Returns true if OpSegment should own the kernel. public static native @Cast("bool") boolean ShouldOwnKernel(FunctionLibraryRuntime lib, @StdString BytePointer node_op); public static native @Cast("bool") boolean ShouldOwnKernel(FunctionLibraryRuntime lib, @StdString String node_op); } // end namespace tensorflow // #endif // TENSORFLOW_FRAMEWORK_OP_SEGMENT_H_ // Parsed from tensorflow/core/framework/shape_inference.h /* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_SHAPE_INFERENCE_H_ // #define TENSORFLOW_CORE_FRAMEWORK_SHAPE_INFERENCE_H_ // #include // #include "tensorflow/core/framework/node_def_util.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/lib/core/errors.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/lib/gtl/inlined_vector.h" // #include "tensorflow/core/platform/macros.h" @Namespace("tensorflow") @Opaque public static class ShapeRefinerTest extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public ShapeRefinerTest() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ShapeRefinerTest(Pointer p) { super(p); } } @Namespace("tensorflow::grappler") @Opaque public static class GraphProperties extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public GraphProperties() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GraphProperties(Pointer p) { super(p); } } @Namespace("tensorflow::grappler") @Opaque public static class SymbolicShapeManager extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public SymbolicShapeManager() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SymbolicShapeManager(Pointer p) { super(p); } } // namespace grappler // Dimension values are accessed through InferenceContext. @Namespace("tensorflow::shape_inference") @NoOffset public static class Dimension extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dimension(Pointer p) { super(p); } } @Namespace("tensorflow::shape_inference") @NoOffset public static class DimensionHandle extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DimensionHandle(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public DimensionHandle(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public DimensionHandle position(long position) { return (DimensionHandle)super.position(position); } public DimensionHandle() { super((Pointer)null); allocate(); } private native void allocate(); public native @Cast("bool") boolean SameHandle(@ByVal DimensionHandle d); public native @Cast("std::size_t") long Handle(); } // Shape rank and dimensions are accessed through InferenceContext. @Namespace("tensorflow::shape_inference") @NoOffset public static class Shape extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Shape(Pointer p) { super(p); } } @Namespace("tensorflow::shape_inference") @NoOffset public static class ShapeHandle extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ShapeHandle(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public ShapeHandle(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public ShapeHandle position(long position) { return (ShapeHandle)super.position(position); } public ShapeHandle() { super((Pointer)null); allocate(); } private native void allocate(); public native @Cast("bool") boolean SameHandle(@ByVal ShapeHandle s); public native @Cast("std::size_t") long Handle(); } // Struct used to allow functions to take DimensionHandle or a dimension value. // Not meant to be constructed directly. @Namespace("tensorflow::shape_inference") @NoOffset public static class DimensionOrConstant extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DimensionOrConstant(Pointer p) { super(p); } // Intentionally not explicit. public DimensionOrConstant(@ByVal DimensionHandle dim) { super((Pointer)null); allocate(dim); } private native void allocate(@ByVal DimensionHandle dim); // val must be non-negative or InferenceContext::kUnknownDim. public DimensionOrConstant(@Cast("tensorflow::int64") long val) { super((Pointer)null); allocate(val); } private native void allocate(@Cast("tensorflow::int64") long val); // dim takes precedence. If dim != nullptr, val is ignored. public native @ByRef DimensionHandle dim(); public native DimensionOrConstant dim(DimensionHandle dim); public native @Cast("tensorflow::int64") long val(); public native DimensionOrConstant val(long val); } @Namespace("tensorflow::shape_inference") @NoOffset public static class ShapeAndType extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ShapeAndType(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public ShapeAndType(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public ShapeAndType position(long position) { return (ShapeAndType)super.position(position); } public ShapeAndType() { super((Pointer)null); allocate(); } private native void allocate(); public ShapeAndType(@ByVal ShapeHandle s, @Cast("tensorflow::DataType") int t) { super((Pointer)null); allocate(s, t); } private native void allocate(@ByVal ShapeHandle s, @Cast("tensorflow::DataType") int t); public native @ByRef ShapeHandle shape(); public native ShapeAndType shape(ShapeHandle shape); public native @Cast("tensorflow::DataType") int dtype(); public native ShapeAndType dtype(int dtype); } // Shape inference functions registered on ops in REGISTER_OP implement // their shape functions in terms of this InferenceContext. An InferenceContext // is created by the framework and passed to a shape inference function. The // shape inference function calls functions on the context, and should call // set_output() to set the shape on all outputs. // // To infer shapes for user-defined functions see ShapeRefiner. // // All Shape* and Dimension* returned by functions of InferenceContext are owned // by the InferenceContext. @Namespace("tensorflow::shape_inference") public static class InferenceContext extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InferenceContext(Pointer p) { super(p); } @MemberGetter public static native @Cast("const tensorflow::int64") long kUnknownDim(); public static final long kUnknownDim = kUnknownDim(); @MemberGetter public static native int kUnknownRank(); public static final int kUnknownRank = kUnknownRank(); // is NULL-padded to be the same size as . // // Elements of are used for when a shape function // makes a call to MakeShapeFromShapeTensor; in particular, when the // input_tensors[i] is nullptr but the shape represented by it is partially // known from analysis of the graph. // can have fewer elements than . // Values of do not need to outlive the context. // // REQUIRES: is not NULL, and must outlive the InferenceContext. // is NULL-padded to be the same size as . // // Elements of are used for when a shape // function makes a call to MakeShapeFromShapeTensor; in particular, when // the input_tensors[i] is nullptr but the shape represented by it is // partially known from analysis of the graph. // can have fewer elements than . Values of // do not need to outlive the context. // // REQUIRES: is not NULL, and must outlive the // InferenceContext. // is NULL-padded to be the same size as . // // Elements of are used for when a shape // function makes a call to MakeShapeFromShapeTensor; in particular, when // the input_tensors[i] is nullptr but the shape represented by it is // partially known from analysis of the graph. // can have fewer elements than . Values of // do not need to outlive the context. // // REQUIRES: is not NULL, and must outlive the // InferenceContext. // Runs the shape inference function 'fn' with 'this' as the // argument, returns the status of the inference. // // On error, additional context is provided in the error message. public native @ByVal Status Run(@ByVal ShapeInferenceFn fn); // Merge the stored shape of the input in position idx with according // to the following rules: // // - If the ShapeHandles are the same or is unknown, there will be no // change. Otherwise if the stored shape is unknown, the new shape will be // . // - If both shapes are known, then they must have the same rank. // - For any one dimension, if the values for that dimension in both shapes // are known, then the values must match. // - If one shape has equal or more information than the other shape in every // dimension, the new shape will become the shape with more information. // - Example: merging [2,?] and [?,2] results in [2,2] // - Example: [2,2] cannot be merged with [1,2] // // This requires idx to be in the [0, num_inputs) range. If the merge is // successful, return true. Return false otherwise. public native @Cast("bool") boolean MergeInput(int idx, @ByVal ShapeHandle shape); // Relax the stored shape of the input in position idx with according // to the following rules: // // - If the ShapeHandles are the same then the stored shape will be returned. // - If either of the ShapeHandles are unknown, then a new UnknownShape will // be returned. A new shape must be returned because we cannot claim that // the resulting shape is necessarily the same as either of the input // shapes. // - If the shapes both have known ranks but their ranks are different, a new // UnknownShape will be returned. // - For any one dimension, if the value for that dimension in either of the // shapes is unknown, a new shape will be returned with a new UnknownDim in // that dimension. // - For any one dimension, if the values for that dimension in both shapes // are known but do not match, a new shape will be returned with a new // UnknownDim in that dimension. // - If both shapes have the same known rank and match in every dimension, // the stored shape will be returned. // - Example: relaxing [2,?] and [?,2] results in [?,?] // - Example: relaxing [2,2] and [3,2] results in [?,2] // - Example: relaxing [2,2] with [1,2,3] results in ? // // This requires idx to be in the [0, num_inputs) range. If the relax is // successful and the new shape differs from the old one, store the new // shape and return true. Return false otherwise. public native @Cast("bool") boolean RelaxInput(int idx, @ByVal ShapeHandle shape); public native void SetInput(int idx, @ByVal ShapeHandle shape); public native @ByVal ShapeHandle input(@Cast("tensorflow::int64") long idx); public native @ByVal Status input(@StringPiece BytePointer input_name, @StdVector ShapeHandle output); public native @ByVal Status input(@StringPiece String input_name, @StdVector ShapeHandle output); public native int num_inputs(); // Returns the input tensor at index , or nullptr if the input tensor is // not available at the time of shape inference. public native @Const Tensor input_tensor(int idx); // Returns true iff input_tensor(idx) was called by the shape function. public native @Cast("bool") boolean requested_input_tensor(int idx); // Returns true if MakeShapeFromInputTensor was called but the constant // input_tensor was not present. public native @Cast("bool") boolean requested_input_tensor_as_partial_shape(int idx); public native void set_input_tensors(@Const @ByRef ConstTensorPtrVector input_tensors); public native void set_input_tensors_as_shapes( @StdVector ShapeHandle input_tensors_as_shapes); public native @StdVector ShapeHandle input_tensors_as_shapes(); public native @ByVal ShapeHandle output(@Cast("tensorflow::int64") long idx); public native void set_output(int idx, @ByVal ShapeHandle shape); public native @ByVal Status set_output(@StringPiece BytePointer output_name, @StdVector ShapeHandle shapes); public native @ByVal Status set_output(@StringPiece String output_name, @StdVector ShapeHandle shapes); public native int num_outputs(); public native @ByVal ShapeHandle output(int idx); public native @ByVal Status output(@StringPiece BytePointer output_name, @StdVector ShapeHandle output); public native @ByVal Status output(@StringPiece String output_name, @StdVector ShapeHandle output); public native @ByVal AttrSlice attrs(); public native @StdString BytePointer op(); // idx can be negative for an offset from end of dimensions. // idx must be in the range [-1 * s.rank, s.rank). public native @ByVal DimensionHandle Dim(@ByVal ShapeHandle s, @Cast("tensorflow::int64") long idx); // As above, but asserts that the rank of the shape is known. public static native @ByVal DimensionHandle DimKnownRank(@ByVal ShapeHandle s, @Cast("tensorflow::int64") long idx); public static native int Rank(@ByVal ShapeHandle s); public static native @Cast("bool") boolean RankKnown(@ByVal ShapeHandle s); public static native @Cast("tensorflow::int64") long Value(@ByVal DimensionOrConstant d); public static native @Cast("bool") boolean ValueKnown(@ByVal DimensionOrConstant d); // Fills the output proto with the shape defined by the handle. // "proto" is expected to be empty prior to the call. public native void ShapeHandleToProto(@ByVal ShapeHandle handle, TensorShapeProto proto); // Returns true if the rank and all dimensions of the Shape are known. public native @Cast("bool") boolean FullyDefined(@ByVal ShapeHandle s); // Returns the total number of elements, or an unknown dimension for an // incomplete shape. public native @ByVal DimensionHandle NumElements(@ByVal ShapeHandle s); public native @StdString BytePointer DebugString(@ByVal ShapeHandle s); public native @StdString BytePointer DebugString(@ByVal DimensionHandle d); public native @StdString BytePointer DebugString(@Const @ByRef ShapeAndType shape_and_type); // Describes the whole context, for debugging purposes. public native @StdString BytePointer DebugString(); // If has rank , or its rank is unknown, return OK and return // the shape with asserted rank in <*out>. Otherwise return an error. // // Note that <*out> may be set to . public native @ByVal Status WithRank(@ByVal ShapeHandle shape, @Cast("tensorflow::int64") long rank, ShapeHandle out); public native @ByVal Status WithRankAtLeast(@ByVal ShapeHandle shape, @Cast("tensorflow::int64") long rank, ShapeHandle out); public native @ByVal Status WithRankAtMost(@ByVal ShapeHandle shape, @Cast("tensorflow::int64") long rank, ShapeHandle out); // If has value , or its value is unknown, returns OK and returns // the dimension with asserted value in <*out>. Otherwise returns an error. // // Note that <*out> may be set to . public native @ByVal Status WithValue(@ByVal DimensionHandle dim, @Cast("tensorflow::int64") long value, DimensionHandle out); // Merges and and returns the merged shape in <*out>. See // 'MergeInput' function for full details and examples. public native @ByVal Status Merge(@ByVal ShapeHandle s0, @ByVal ShapeHandle s1, ShapeHandle out); // Asserts that 's rank >= 's rank, and the first // dimensions of are compatible with the dimensions of // . // Returns the merged results in <*s_out> and <*prefix_out>. public native @ByVal Status MergePrefix(@ByVal ShapeHandle s, @ByVal ShapeHandle prefix, ShapeHandle s_out, ShapeHandle prefix_out); // Merges and and returns the merged dimension in <*out>. If // and have incompatible values, returns an error. // // Note that <*out> may be set to or . public native @ByVal Status Merge(@ByVal DimensionHandle d0, @ByVal DimensionHandle d1, DimensionHandle out); // Returns in <*out> a sub-shape of with dimensions [start:]. // can be negative to index from the end of the shape. If > // rank of , then an empty subshape is returned. public native @ByVal Status Subshape(@ByVal ShapeHandle s, @Cast("tensorflow::int64") long start, ShapeHandle out); // Returns in <*out> a sub-shape of , with dimensions [start:end]. // and can be negative, to index from the end of the shape. // and are set to the rank of if > rank of . public native @ByVal Status Subshape(@ByVal ShapeHandle s, @Cast("tensorflow::int64") long start, @Cast("tensorflow::int64") long end, ShapeHandle out); // Returns in <*out> a sub-shape of , with dimensions [start:end:stride]. // and can be negative, to index from the end of the shape. // and are set to the rank of if > rank of . // can be negative, to reverse the . public native @ByVal Status Subshape(@ByVal ShapeHandle s, @Cast("tensorflow::int64") long start, @Cast("tensorflow::int64") long end, @Cast("tensorflow::int64") long stride, ShapeHandle out); // Returns in <*out> the result of appending the dimensions of to those // of . public native @ByVal Status Concatenate(@ByVal ShapeHandle s1, @ByVal ShapeHandle s2, ShapeHandle out); // Returns in the shape from replacing with // . public native @ByVal Status ReplaceDim(@ByVal ShapeHandle s, @Cast("tensorflow::int64") long dim_index, @ByVal DimensionHandle new_dim, ShapeHandle out); // Returns a new shape with the given dims. The returned value is owned by // this context. public native @ByVal ShapeHandle MakeShape(@StdVector DimensionHandle dims); // Returns a new unknown shape. public native @ByVal ShapeHandle UnknownShape(); // Returns a shape with specified rank but unknown dims. public native @ByVal ShapeHandle UnknownShapeOfRank(@Cast("tensorflow::int64") long rank); // Returns a new shape of zero dimensions. public native @ByVal ShapeHandle Scalar(); // Returns a new shape of one dimension. public native @ByVal ShapeHandle Vector(@ByVal DimensionOrConstant dim); // Returns a new shape of two dimensions. public native @ByVal ShapeHandle Matrix(@ByVal DimensionOrConstant dim1, @ByVal DimensionOrConstant dim2); // Returns in a new shape whose dimension sizes come from input tensor // . The tensor must be a 1-dimensional int32 or int64 tensor. If // the input tensor is NULL, then an unknown shape is returned. public native @ByVal Status MakeShapeFromShapeTensor(int input_idx, ShapeHandle out); // Like the function above, but treats scalar values as unknown // shapes. **NOTE** If the scalar is statically known, its value // must be -1 or an error is returned. public native @ByVal Status MakeShapeFromShapeTensorTreatScalarAsUnknownShape(int input_idx, ShapeHandle out); // Returns in a new shape corresponding to . public native @ByVal Status MakeShapeFromShapeProto(@Const @ByRef TensorShapeProto proto, ShapeHandle out); // Returns in a new shape corresponding to . public native @ByVal Status MakeShapeFromPartialTensorShape( @Const @ByRef PartialTensorShape partial_shape, ShapeHandle out); // Returns in a new shape corresponding to . public native @ByVal Status MakeShapeFromTensorShape(@Const @ByRef TensorShape shape, ShapeHandle out); // Returns a new dimension of the given size. The returned value is owned by // this context. public native @ByVal DimensionHandle MakeDim(@ByVal DimensionOrConstant d); public native @ByVal DimensionHandle UnknownDim(); // Returns in a scalar value from an input tensor . The input tensor // must be a 1-dimensional int32 or int64 tensor. Caller must ensure that the // input tensor is not NULL. public native @ByVal Status GetScalarFromTensor(@Const Tensor t, @Cast("tensorflow::int64*") LongPointer val); public native @ByVal Status GetScalarFromTensor(@Const Tensor t, @Cast("tensorflow::int64*") LongBuffer val); public native @ByVal Status GetScalarFromTensor(@Const Tensor t, @Cast("tensorflow::int64*") long... val); // Returns a new dimension whose value is given by a scalar input tensor. // The input tensor must be in host memory, since it is dereferenced to get // the value. public native @ByVal Status MakeDimForScalarInput(int idx, DimensionHandle out); // Returns a new dimension whose value is given by a scalar input tensor. // This allows for a negative input dimension given the rank of a separate // tensor. This rank can be negative if unknown. // The input tensor must be in host memory, since it is dereferenced to get // the value. public native @ByVal Status MakeDimForScalarInputWithNegativeIndexing(int idx, int input_rank, DimensionHandle out); // Look up the attr for the NodeDef being evaluated with name attr_name and // set *value to its value. If no attr with attr_name is found in def(), or // the attr does not have a matching type, a non-ok status will be returned. // Returns in the result of dividing by . // Returns an error if is not positive or if // and does not evenly divide . public native @ByVal Status Divide(@ByVal DimensionHandle dividend, @ByVal DimensionOrConstant divisor, @Cast("bool") boolean evenly_divisible, DimensionHandle out); // Returns in the sum of and . public native @ByVal Status Add(@ByVal DimensionHandle first, @ByVal DimensionOrConstant second, DimensionHandle out); // Returns in the dimension that is minus . public native @ByVal Status Subtract(@ByVal DimensionHandle first, @ByVal DimensionOrConstant second, DimensionHandle out); // Returns in the product of and . public native @ByVal Status Multiply(@ByVal DimensionHandle first, @ByVal DimensionOrConstant second, DimensionHandle out); // Returns in the minimum of and . If either or // is zero the results is zero. Otherwise, if either or // is unknown the results is unknown. public native @ByVal Status Min(@ByVal DimensionHandle first, @ByVal DimensionOrConstant second, DimensionHandle out); // Returns in the maximum of and . If either or // is unknown the results is unknown. public native @ByVal Status Max(@ByVal DimensionHandle first, @ByVal DimensionOrConstant second, DimensionHandle out); public native @ByVal Status construction_status(); // Methods to propagate shape and dtype on edges of handles. Handles are the // dtype DT_RESOURCE which can be used to access state stored in a // ResourceManager. When ops (such as variables) consume these handles to // produce tensors they might need to know side-information about the shapes // and dtypes of tensors which can be accessed via the handle. These methods // propagate that information. Output handle dtypes and shapes are ignored if // the output tensor is not of type DT_RESOURCE. // Merge the stored shapes and types corresponding to the input handle in // position idx with the specified shapes and types. This requires idx to be // in the [0, num_inputs) range. // // If the merge is successful and any of the new shapes differs from the old // one, or any of the old dtypes was DT_INVALID, store the new shapes and // return true. Return false otherwise. // // See 'MergeInput' function for full details and examples. public native @Cast("bool") boolean MergeInputHandleShapesAndTypes( int idx, @StdVector ShapeAndType shapes_and_types); // As MergeInputHandleShapesAndTypes, but for an output. public native @Cast("bool") boolean MergeOutputHandleShapesAndTypes( int idx, @StdVector ShapeAndType shapes_and_types); // Relaxes the stored shapes and types corresponding to the input handle in // position idx with the specified shapes and types. This requires idx to be // in the [0, num_inputs) range. // // If the relax is successful and any of the new shapes differs from the old // one, or any of the old dtypes was DT_INVALID, store the new shapes and // return true. Return false otherwise. // // See 'RelaxInput' function for full details and examples. public native @Cast("bool") boolean RelaxInputHandleShapesAndMergeTypes( int idx, @StdVector ShapeAndType shapes_and_types); // As RelaxInputHandleShapesAndTypes, but for an output. public native @Cast("bool") boolean RelaxOutputHandleShapesAndMergeTypes( int idx, @StdVector ShapeAndType shapes_and_types); public native void set_input_handle_shapes_and_types( int idx, @StdVector ShapeAndType shapes_and_types); // Returns the output handle shapes and types, for the resource tensor output // at index . Returns NULL if the shape and types were never set. public native @StdVector ShapeAndType output_handle_shapes_and_types(int idx); // Returns the inputs handle shapes and types, for the resource tensor output // at index . Returns NULL if the shape and types were not available. public native @StdVector ShapeAndType input_handle_shapes_and_types(int idx); public native void set_output_handle_shapes_and_types( int idx, @StdVector ShapeAndType shapes_and_types); // Note that shape functions should usually call MakeShapeFromShapeTensor, // as it does more analysis to provide partial shapes. // // Returns in a new shape whose dimension sizes come from tensor . // The tensor must be a 1-dimensional int32 or int64 tensor. If is NULL, // then an unknown shape is returned. public native @ByVal Status MakeShapeFromTensor(@Const Tensor t, @ByVal ShapeHandle tensor_shape, ShapeHandle out); public native int graph_def_version(); public native @StdVector ShapeHandlePair MergedShapes(); public native @StdVector DimensionHandlePair MergedDims(); } // ----------------------------------------------------------------------------- // Template and inline method implementations, please ignore // namespace shape_inference // namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_SHAPE_INFERENCE_H_ // Parsed from tensorflow/core/framework/partial_tensor_shape.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_PARTIAL_TENSOR_SHAPE_H_ // #define TENSORFLOW_CORE_FRAMEWORK_PARTIAL_TENSOR_SHAPE_H_ // TODO(irving): Remove this forwarding header // #include "tensorflow/core/framework/tensor_shape.h" // #endif // TENSORFLOW_CORE_FRAMEWORK_PARTIAL_TENSOR_SHAPE_H_ // Parsed from tensorflow/core/framework/device_attributes.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/framework/device_attributes.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fdevice_5fattributes_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fdevice_5fattributes_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fframework_2fdevice_5fattributes_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fframework_2fdevice_5fattributes_2eproto // namespace tensorflow // namespace protobuf // namespace google // =================================================================== @Namespace("tensorflow") @NoOffset public static class InterconnectLink extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InterconnectLink(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public InterconnectLink(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public InterconnectLink position(long position) { return (InterconnectLink)super.position(position); } public InterconnectLink() { super((Pointer)null); allocate(); } private native void allocate(); public InterconnectLink(@Const @ByRef InterconnectLink from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef InterconnectLink from); public native @ByRef @Name("operator =") InterconnectLink put(@Const @ByRef InterconnectLink from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef InterconnectLink default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const InterconnectLink internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(InterconnectLink other); public native void Swap(InterconnectLink other); // implements Message ---------------------------------------------- public native InterconnectLink New(); public native InterconnectLink New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef InterconnectLink from); public native void MergeFrom(@Const @ByRef InterconnectLink from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // string type = 2; public native void clear_type(); @MemberGetter public static native int kTypeFieldNumber(); public static final int kTypeFieldNumber = kTypeFieldNumber(); public native @StdString BytePointer type(); public native void set_type(@StdString BytePointer value); public native void set_type(@StdString String value); // #if LANG_CXX11 // #endif public native void set_type(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_type(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_type(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_type(); public native void set_allocated_type(@StdString @Cast({"char*", "std::string*"}) BytePointer type); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_type(); public native @Deprecated void unsafe_arena_set_allocated_type( @StdString @Cast({"char*", "std::string*"}) BytePointer type); // int32 device_id = 1; public native void clear_device_id(); @MemberGetter public static native int kDeviceIdFieldNumber(); public static final int kDeviceIdFieldNumber = kDeviceIdFieldNumber(); public native @Cast("google::protobuf::int32") int device_id(); public native void set_device_id(@Cast("google::protobuf::int32") int value); // int32 strength = 3; public native void clear_strength(); @MemberGetter public static native int kStrengthFieldNumber(); public static final int kStrengthFieldNumber = kStrengthFieldNumber(); public native @Cast("google::protobuf::int32") int strength(); public native void set_strength(@Cast("google::protobuf::int32") int value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class LocalLinks extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LocalLinks(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public LocalLinks(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public LocalLinks position(long position) { return (LocalLinks)super.position(position); } public LocalLinks() { super((Pointer)null); allocate(); } private native void allocate(); public LocalLinks(@Const @ByRef LocalLinks from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef LocalLinks from); public native @ByRef @Name("operator =") LocalLinks put(@Const @ByRef LocalLinks from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef LocalLinks default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const LocalLinks internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(LocalLinks other); public native void Swap(LocalLinks other); // implements Message ---------------------------------------------- public native LocalLinks New(); public native LocalLinks New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef LocalLinks from); public native void MergeFrom(@Const @ByRef LocalLinks from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.InterconnectLink link = 1; public native int link_size(); public native void clear_link(); @MemberGetter public static native int kLinkFieldNumber(); public static final int kLinkFieldNumber = kLinkFieldNumber(); public native InterconnectLink mutable_link(int index); public native @Const @ByRef InterconnectLink link(int index); public native InterconnectLink add_link(); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class DeviceLocality extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DeviceLocality(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public DeviceLocality(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public DeviceLocality position(long position) { return (DeviceLocality)super.position(position); } public DeviceLocality() { super((Pointer)null); allocate(); } private native void allocate(); public DeviceLocality(@Const @ByRef DeviceLocality from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef DeviceLocality from); public native @ByRef @Name("operator =") DeviceLocality put(@Const @ByRef DeviceLocality from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef DeviceLocality default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const DeviceLocality internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(DeviceLocality other); public native void Swap(DeviceLocality other); // implements Message ---------------------------------------------- public native DeviceLocality New(); public native DeviceLocality New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef DeviceLocality from); public native void MergeFrom(@Const @ByRef DeviceLocality from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // .tensorflow.LocalLinks links = 3; public native @Cast("bool") boolean has_links(); public native void clear_links(); @MemberGetter public static native int kLinksFieldNumber(); public static final int kLinksFieldNumber = kLinksFieldNumber(); public native @Const @ByRef LocalLinks links(); public native LocalLinks release_links(); public native LocalLinks mutable_links(); public native void set_allocated_links(LocalLinks links); public native void unsafe_arena_set_allocated_links( LocalLinks links); public native LocalLinks unsafe_arena_release_links(); // int32 bus_id = 1; public native void clear_bus_id(); @MemberGetter public static native int kBusIdFieldNumber(); public static final int kBusIdFieldNumber = kBusIdFieldNumber(); public native @Cast("google::protobuf::int32") int bus_id(); public native void set_bus_id(@Cast("google::protobuf::int32") int value); // int32 numa_node = 2; public native void clear_numa_node(); @MemberGetter public static native int kNumaNodeFieldNumber(); public static final int kNumaNodeFieldNumber = kNumaNodeFieldNumber(); public native @Cast("google::protobuf::int32") int numa_node(); public native void set_numa_node(@Cast("google::protobuf::int32") int value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class DeviceAttributes extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DeviceAttributes(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public DeviceAttributes(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public DeviceAttributes position(long position) { return (DeviceAttributes)super.position(position); } public DeviceAttributes() { super((Pointer)null); allocate(); } private native void allocate(); public DeviceAttributes(@Const @ByRef DeviceAttributes from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef DeviceAttributes from); public native @ByRef @Name("operator =") DeviceAttributes put(@Const @ByRef DeviceAttributes from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef DeviceAttributes default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const DeviceAttributes internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(DeviceAttributes other); public native void Swap(DeviceAttributes other); // implements Message ---------------------------------------------- public native DeviceAttributes New(); public native DeviceAttributes New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef DeviceAttributes from); public native void MergeFrom(@Const @ByRef DeviceAttributes from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // string name = 1; public native void clear_name(); @MemberGetter public static native int kNameFieldNumber(); public static final int kNameFieldNumber = kNameFieldNumber(); public native @StdString BytePointer name(); public native void set_name(@StdString BytePointer value); public native void set_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name(); public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_name(); public native @Deprecated void unsafe_arena_set_allocated_name( @StdString @Cast({"char*", "std::string*"}) BytePointer name); // string device_type = 2; public native void clear_device_type(); @MemberGetter public static native int kDeviceTypeFieldNumber(); public static final int kDeviceTypeFieldNumber = kDeviceTypeFieldNumber(); public native @StdString BytePointer device_type(); public native void set_device_type(@StdString BytePointer value); public native void set_device_type(@StdString String value); // #if LANG_CXX11 // #endif public native void set_device_type(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_device_type(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_device_type(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_device_type(); public native void set_allocated_device_type(@StdString @Cast({"char*", "std::string*"}) BytePointer device_type); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_device_type(); public native @Deprecated void unsafe_arena_set_allocated_device_type( @StdString @Cast({"char*", "std::string*"}) BytePointer device_type); // string physical_device_desc = 7; public native void clear_physical_device_desc(); @MemberGetter public static native int kPhysicalDeviceDescFieldNumber(); public static final int kPhysicalDeviceDescFieldNumber = kPhysicalDeviceDescFieldNumber(); public native @StdString BytePointer physical_device_desc(); public native void set_physical_device_desc(@StdString BytePointer value); public native void set_physical_device_desc(@StdString String value); // #if LANG_CXX11 // #endif public native void set_physical_device_desc(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_physical_device_desc(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_physical_device_desc(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_physical_device_desc(); public native void set_allocated_physical_device_desc(@StdString @Cast({"char*", "std::string*"}) BytePointer physical_device_desc); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_physical_device_desc(); public native @Deprecated void unsafe_arena_set_allocated_physical_device_desc( @StdString @Cast({"char*", "std::string*"}) BytePointer physical_device_desc); // .tensorflow.DeviceLocality locality = 5; public native @Cast("bool") boolean has_locality(); public native void clear_locality(); @MemberGetter public static native int kLocalityFieldNumber(); public static final int kLocalityFieldNumber = kLocalityFieldNumber(); public native @Const @ByRef DeviceLocality locality(); public native DeviceLocality release_locality(); public native DeviceLocality mutable_locality(); public native void set_allocated_locality(DeviceLocality locality); public native void unsafe_arena_set_allocated_locality( DeviceLocality locality); public native DeviceLocality unsafe_arena_release_locality(); // int64 memory_limit = 4; public native void clear_memory_limit(); @MemberGetter public static native int kMemoryLimitFieldNumber(); public static final int kMemoryLimitFieldNumber = kMemoryLimitFieldNumber(); public native @Cast("google::protobuf::int64") long memory_limit(); public native void set_memory_limit(@Cast("google::protobuf::int64") long value); // fixed64 incarnation = 6; public native void clear_incarnation(); @MemberGetter public static native int kIncarnationFieldNumber(); public static final int kIncarnationFieldNumber = kIncarnationFieldNumber(); public native @Cast("google::protobuf::uint64") long incarnation(); public native void set_incarnation(@Cast("google::protobuf::uint64") long value); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // InterconnectLink // int32 device_id = 1; // string type = 2; // #if LANG_CXX11 // #endif // int32 strength = 3; // ------------------------------------------------------------------- // LocalLinks // repeated .tensorflow.InterconnectLink link = 1; // ------------------------------------------------------------------- // DeviceLocality // int32 bus_id = 1; // int32 numa_node = 2; // .tensorflow.LocalLinks links = 3; // ------------------------------------------------------------------- // DeviceAttributes // string name = 1; // #if LANG_CXX11 // #endif // string device_type = 2; // #if LANG_CXX11 // #endif // int64 memory_limit = 4; // .tensorflow.DeviceLocality locality = 5; // fixed64 incarnation = 6; // string physical_device_desc = 7; // #if LANG_CXX11 // #endif // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2fdevice_5fattributes_2eproto // Parsed from tensorflow/core/public/session.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_PUBLIC_SESSION_H_ // #define TENSORFLOW_CORE_PUBLIC_SESSION_H_ // #include // #include // #include "tensorflow/core/framework/device_attributes.pb.h" // #include "tensorflow/core/framework/graph.pb.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/platform/env.h" // #include "tensorflow/core/protobuf/config.pb.h" // #include "tensorflow/core/public/session_options.h" /** \brief A Session instance lets a caller drive a TensorFlow graph * computation. * * When a Session is created with a given target, a new Session object * is bound to the universe of resources specified by that target. * Those resources are available to this session to perform * computation described in the GraphDef. After extending the session * with a graph, the caller uses the Run() API to perform the * computation and potentially fetch outputs as Tensors. * * Example: * *

{@code c++
 * 
 *      tensorflow::GraphDef graph;
 *      // ... Create or load graph into "graph".
 * 
 *      // This example uses the default options which connects
 *      // to a local runtime.
 *      tensorflow::SessionOptions options;
 *      std::unique_ptr
 *      session(tensorflow::NewSession(options));
 * 
 *      // Create the session with this graph.
 *      tensorflow::Status s = session->Create(graph);
 *      if (!s.ok()) { ... }
 * 
 *      // Run the graph and fetch the first output of the "output"
 *      // operation, and also run to but do not return anything
 *      // for the "update_state" operation.
 *      std::vector outputs;
 *      s = session->Run({}, {"output:0"}, {"update_state"}, &outputs);
 *      if (!s.ok()) { ... }
 * 
 *      // Map the output as a flattened float tensor, and do something
 *      // with it.
 *      auto output_tensor = outputs[0].flat();
 *      if (output_tensor(0) > 0.5) { ... }
 * 
 *      // Close the session to release the resources associated with
 *      // this session.
 *      session->Close();
 * 
 *  }
* * A Session allows concurrent calls to Run(), though a Session must * be created / extended by a single thread. * * Only one thread must call Close(), and Close() must only be called * after all other calls to Run() have returned. */ @Namespace("tensorflow") public static class Session extends AbstractSession { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Session(Pointer p) { super(p); } /// /** Calls {@link tensorflow#NewSession(SessionOptions)} and registers a deallocator. */ public Session(SessionOptions options) { super(options); } /** \brief Create the graph to be used for the session. * * Returns an error if this session has already been created with a * graph. To re-use the session with a different graph, the caller * must Close() the session first. */ /// public native @ByVal Status Create(@Const @ByRef GraphDef graph); /** \brief Adds operations to the graph that is already registered with the * Session. * * The names of new operations in "graph" must not exist in the * graph that is already registered. */ /// /// /// /// /// public native @ByVal Status Extend(@Const @ByRef GraphDef graph); /** \brief Runs the graph with the provided input tensors and fills * {@code outputs} for the endpoints specified in {@code output_tensor_names}. * Runs to but does not return Tensors for the nodes in * {@code target_node_names}. * * The order of tensors in {@code outputs} will match the order provided * by {@code output_tensor_names}. * * If {@code Run} returns {@code OK()}, then {@code outputs->size()} will be equal to * {@code output_tensor_names.size()}. If {@code Run} does not return {@code OK()}, the * state of {@code outputs} is undefined. * * REQUIRES: The name of each Tensor of the input or output must * match a "Tensor endpoint" in the {@code GraphDef} passed to {@code Create()}. * * REQUIRES: At least one of {@code output_tensor_names} and * {@code target_node_names} must be non-empty. * * REQUIRES: outputs is not nullptr if {@code output_tensor_names} is non-empty. */ public native @ByVal Status Run(@Const @ByRef StringTensorPairVector inputs, @Const @ByRef StringVector output_tensor_names, @Const @ByRef StringVector target_node_names, TensorVector outputs); /** \brief Implementations which support {@code RunOptions}. */ // /** NOTE: This API is still experimental and may change. */ public native @ByVal Status Create(@Const @ByRef RunOptions run_options, @Const @ByRef GraphDef graph); public native @ByVal Status Extend(@Const @ByRef RunOptions run_options, @Const @ByRef GraphDef graph); public native @ByVal Status Close(@Const @ByRef RunOptions run_options); /** \brief Like {@code Run}, but allows users to pass in a {@code RunOptions} proto and * to retrieve non-Tensor metadata output via a {@code RunMetadata} proto for this * step. {@code run_metadata} may be nullptr, in which case any metadata output is * discarded. * NOTE: This API is still experimental and may change. */ public native @ByVal Status Run(@Const @ByRef RunOptions run_options, @Const @ByRef StringTensorPairVector inputs, @Const @ByRef StringVector output_tensor_names, @Const @ByRef StringVector target_node_names, TensorVector outputs, RunMetadata run_metadata); /** \brief Sets up a graph for partial execution. All future feeds and * fetches are specified by {@code input_names} and {@code output_names}. Returns * {@code handle} that can be used to perform a sequence of partial feeds and * fetches. * NOTE: This API is still experimental and may change. */ public native @ByVal Status PRunSetup(@Const @ByRef StringVector input_names, @Const @ByRef StringVector output_names, @Const @ByRef StringVector target_nodes, @StdString @Cast({"char*", "std::string*"}) BytePointer handle); /** \brief Continues the pending execution specified by {@code handle} with the * provided input tensors and fills {@code outputs} for the endpoints specified * in {@code output_names}. * NOTE: This API is still experimental and may change. */ /// public native @ByVal Status PRun(@StdString BytePointer handle, @Const @ByRef StringTensorPairVector inputs, @Const @ByRef StringVector output_names, TensorVector outputs); public native @ByVal Status PRun(@StdString String handle, @Const @ByRef StringTensorPairVector inputs, @Const @ByRef StringVector output_names, TensorVector outputs); /** \brief List devices in the session. * * Retrieves the list of available devices within the session, and populates * *response. This API is optional. If it is unimplemented, Status will * return a corresponding error message, and *response will be unmodified. */ /// public native @ByVal Status ListDevices(@StdVector DeviceAttributes response); /** \brief Closes this session. * * Closing a session releases the resources used by this session * on the TensorFlow runtime (specified during session creation by * the {@code SessionOptions::target} field). */ public native @ByVal Status Close(); // NOTE(ashankar): As of July 2017, this method was added to facilitate some // experimentation. Reconsider/re-evaluate after September 2017. // // Sets `*output` to the `DeviceMgr` that owns accessible devices in the // address-space of the caller. public native @ByVal Status LocalDeviceManager(@Cast("const tensorflow::DeviceMgr**") PointerPointer output); public native @ByVal Status LocalDeviceManager(@Const @ByPtrPtr DeviceMgr output); /** \brief A handle to a subgraph, created with {@code Session::MakeCallable()}. */ /** \brief Creates a {@code handle} for invoking the subgraph defined by * {@code callable_options}. * NOTE: This API is still experimental and may change. */ /// public native @ByVal Status MakeCallable(@Const @ByRef CallableOptions callable_options, @Cast("tensorflow::Session::CallableHandle*") LongPointer out_handle); public native @ByVal Status MakeCallable(@Const @ByRef CallableOptions callable_options, @Cast("tensorflow::Session::CallableHandle*") LongBuffer out_handle); public native @ByVal Status MakeCallable(@Const @ByRef CallableOptions callable_options, @Cast("tensorflow::Session::CallableHandle*") long... out_handle); /** \brief Invokes the subgraph named by {@code handle} with the given options and * input tensors. * * The order of tensors in {@code feed_tensors} must and {@code fetch_tensors} will * match the order of names in {@code CallableOptions::feed()} and * {@code CallableOptions::fetch()} when this subgraph was created. * NOTE: This API is still experimental and may change. */ public native @ByVal Status RunCallable(@Cast("tensorflow::Session::CallableHandle") long handle, @Const @ByRef TensorVector feed_tensors, TensorVector fetch_tensors, RunMetadata run_metadata); /** \brief Releases resources associated with the given {@code handle} in this * session. * NOTE: This API is still experimental and may change. */ public native @ByVal Status ReleaseCallable(@Cast("tensorflow::Session::CallableHandle") long handle); } /** \brief Create a new session with the given options. * * If session creation succeeds, the new {@code Session} will be stored in * {@code *out_session}, the caller will take ownership of the returned * {@code *out_session}, and this function will return {@code OK()}. Otherwise, this * function will return an error status and set *out_session to nullptr. */ /// /// /// /// @Namespace("tensorflow") public static native @ByVal Status NewSession(@Const @ByRef SessionOptions options, @Cast("tensorflow::Session**") PointerPointer out_session); @Namespace("tensorflow") public static native @ByVal Status NewSession(@Const @ByRef SessionOptions options, @ByPtrPtr Session out_session); /** \brief Resets resource containers associated with a target. * * Reset() allows misbehaving or slow sessions to be aborted and closed, and * causes their resources eventually to be released. Reset() does not wait * for the computations in old sessions to cease; it merely starts the * process of tearing them down. However, if a new session is started after * a Reset(), the new session is isolated from changes that old sessions * (started prior to the Reset()) may continue to make to resources, provided * all those resources are in containers listed in "containers". * * Old sessions may continue to have side-effects on resources not in * containers listed in "containers", and thus may affect future * sessions' results in ways that are hard to predict. Thus, if well-defined * behavior is desired, it is recommended that all containers be listed in * "containers". * * {@code containers} is a vector of string representation of resource container * names. When a resource container is reset, the resources held by the * container will be released. In particular, all Variables in the container * will become undefined. If the "containers" vector is empty, the default * container is assumed. If the "containers" vector is non-empty, the * default container should be listed explicitly. * * If Reset succeeds, this function will return {@code OK()}. Otherwise, this * function will return an error status. */ /// /// @Namespace("tensorflow") public static native @ByVal Status Reset(@Const @ByRef SessionOptions options, @Const @ByRef StringVector containers); /** \brief Create a new session with the given options. * * If a new {@code Session} object could not be created, this function will * return nullptr. * * *Strongly prefer* the version of NewSession that returns Status, * which contains more helpful error information. */ @Namespace("tensorflow") public static native Session NewSession(@Const @ByRef SessionOptions options); // end namespace tensorflow // #endif // TENSORFLOW_CORE_PUBLIC_SESSION_H_ // Parsed from tensorflow/core/framework/tensor_slice.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/framework/tensor_slice.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2ftensor_5fslice_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2ftensor_5fslice_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fframework_2ftensor_5fslice_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fframework_2ftensor_5fslice_2eproto // namespace tensorflow // namespace protobuf // namespace google // =================================================================== @Namespace("tensorflow") @NoOffset public static class TensorSliceProto_Extent extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorSliceProto_Extent(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TensorSliceProto_Extent(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public TensorSliceProto_Extent position(long position) { return (TensorSliceProto_Extent)super.position(position); } public TensorSliceProto_Extent() { super((Pointer)null); allocate(); } private native void allocate(); public TensorSliceProto_Extent(@Const @ByRef TensorSliceProto_Extent from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef TensorSliceProto_Extent from); public native @ByRef @Name("operator =") TensorSliceProto_Extent put(@Const @ByRef TensorSliceProto_Extent from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef TensorSliceProto_Extent default_instance(); /** enum tensorflow::TensorSliceProto_Extent::HasLengthCase */ public static final int kLength = 2, HAS_LENGTH_NOT_SET = 0; public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const TensorSliceProto_Extent internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(TensorSliceProto_Extent other); public native void Swap(TensorSliceProto_Extent other); // implements Message ---------------------------------------------- public native TensorSliceProto_Extent New(); public native TensorSliceProto_Extent New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef TensorSliceProto_Extent from); public native void MergeFrom(@Const @ByRef TensorSliceProto_Extent from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // int64 start = 1; public native void clear_start(); @MemberGetter public static native int kStartFieldNumber(); public static final int kStartFieldNumber = kStartFieldNumber(); public native @Cast("google::protobuf::int64") long start(); public native void set_start(@Cast("google::protobuf::int64") long value); public native void clear_length(); @MemberGetter public static native int kLengthFieldNumber(); public static final int kLengthFieldNumber = kLengthFieldNumber(); public native @Cast("google::protobuf::int64") long length(); public native void set_length(@Cast("google::protobuf::int64") long value); public native void clear_has_length(); public native @Cast("tensorflow::TensorSliceProto_Extent::HasLengthCase") int has_length_case(); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class TensorSliceProto extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorSliceProto(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TensorSliceProto(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public TensorSliceProto position(long position) { return (TensorSliceProto)super.position(position); } public TensorSliceProto() { super((Pointer)null); allocate(); } private native void allocate(); public TensorSliceProto(@Const @ByRef TensorSliceProto from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef TensorSliceProto from); public native @ByRef @Name("operator =") TensorSliceProto put(@Const @ByRef TensorSliceProto from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef TensorSliceProto default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const TensorSliceProto internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(TensorSliceProto other); public native void Swap(TensorSliceProto other); // implements Message ---------------------------------------------- public native TensorSliceProto New(); public native TensorSliceProto New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef TensorSliceProto from); public native void MergeFrom(@Const @ByRef TensorSliceProto from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .tensorflow.TensorSliceProto.Extent extent = 1; public native int extent_size(); public native void clear_extent(); @MemberGetter public static native int kExtentFieldNumber(); public static final int kExtentFieldNumber = kExtentFieldNumber(); public native TensorSliceProto_Extent mutable_extent(int index); public native @Const @ByRef TensorSliceProto_Extent extent(int index); public native TensorSliceProto_Extent add_extent(); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // TensorSliceProto_Extent // int64 start = 1; // int64 length = 2; // ------------------------------------------------------------------- // TensorSliceProto // repeated .tensorflow.TensorSliceProto.Extent extent = 1; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fframework_2ftensor_5fslice_2eproto // Parsed from tensorflow/core/framework/tensor_slice.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_TENSOR_SLICE_H_ // #define TENSORFLOW_CORE_FRAMEWORK_TENSOR_SLICE_H_ // #include // #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/tensor_slice.pb.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/lib/core/stringpiece.h" // #include "tensorflow/core/lib/gtl/inlined_vector.h" // #include "tensorflow/core/platform/logging.h" // A tensor slice represents a slice of a given tensor. It is represented by a // list of (start, length) pairs, where the size of the list is the rank of the // tensor. @Namespace("tensorflow") @NoOffset public static class TensorSlice extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorSlice(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TensorSlice(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public TensorSlice position(long position) { return (TensorSlice)super.position(position); } // Construct a tensor slice: you have a number of ways: // -- creating an empty slice // -- from just a dimension (in this case it will create a full slice) // -- from an array of pairs of integers. // -- from a TensorSliceProto protocol buffer // -- from a string format of "start,length:start,length..." where each // "start,length" pair represents the slice on one dimension. We allow a // special "-" that means "everything for this dimension". One such example // is: 0,10:-:14,1:-:- public TensorSlice() { super((Pointer)null); allocate(); } private native void allocate(); public TensorSlice(int dim) { super((Pointer)null); allocate(dim); } private native void allocate(int dim); public TensorSlice(@Const @ByRef TensorSliceProto proto) { super((Pointer)null); allocate(proto); } private native void allocate(@Const @ByRef TensorSliceProto proto); public static native @ByVal Status Parse(@StdString BytePointer str, TensorSlice output); public static native @ByVal Status Parse(@StdString String str, TensorSlice output); public static native @ByVal TensorSlice ParseOrDie(@StdString BytePointer str); public static native @ByVal TensorSlice ParseOrDie(@StdString String str); public native void Clear(); // Accessors public native int dims(); public native @Cast("tensorflow::int64") long start(int d); public native @Cast("tensorflow::int64") long length(int d); public native @Cast("tensorflow::int64") long end(int d); public native void set_start(int d, @Cast("tensorflow::int64") long x); public native void set_length(int d, @Cast("tensorflow::int64") long x); // If we have a full slice along dimension "d". public native @Cast("bool") boolean IsFullAt(int d); // If this is a full slice, i.e. IsFullAt(d) for every d. public native @Cast("bool") boolean IsFull(); // Set the slice to be a full slice of "dim" dimensions public native void SetFullSlice(int dim); // Extend a slice to "dim" dimensions: all the added dimensions are full. // Requires: dim >= dims(). public native void Extend(int dim); // Conversion of a TensorSlice to other formats public native void AsProto(TensorSliceProto proto); public native @StdString BytePointer DebugString(); // Fill *indices and *sizes from *this (so that we can use the slice() // function in eigen tensor). We need a tensor shape in case some of the // slices are full slices. // We allow NDIMS to be greater than dims(), in which case we will pad the // higher dimensions with trivial dimensions. // Interaction with other TensorSlices. // Compute the intersection with another slice and if "result" is not // nullptr, store the results in *result; returns true if there is any real // intersection. public native @Cast("bool") boolean Intersect(@Const @ByRef TensorSlice other, TensorSlice result); // A short hand. public native @Cast("bool") boolean Overlaps(@Const @ByRef TensorSlice other); // Equals iff "*this" and "other" are logically equivalent. public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef TensorSlice other); public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef TensorSlice other); // Interaction with TensorShape. // Slices a shape and stores the result into *result_shape. // Requires that the shape and *this have the same rank. // For example, given a tensor shape of {3, 4, 5}, and a slice of // 1,2:-:0,2, the result shape is {2, 4, 2}. public native @ByVal Status SliceTensorShape(@Const @ByRef TensorShape shape, TensorShape result_shape); // Given slice "sub" where "sub" is fully contained in *this, // (meaning that the intersection of "sub" and *this equals "sub"), computes // the "relative" slice of "sub" with respect to *this. // // In other words, if we use A>S to denote slicing a shape S with a slice A, // then the function is computing a slice X such that: // X > (this > S) = sub > S // for any shape S. // // In general, along every dimension, the start of the relative slice is the // start of the "sub" slice minus the start of *this; the length of the // relative slice is the length of the "sub" slice. // // For example, say we have a shape of {3, 4, 5}, "this" is 0,2:-:1,2, and // "sub" is 1,1:2:2,1,2, then the related slice is 1,1:2,2:0,2. // // The caller needs to make sure that "sub" is indeed a sub-slice of *this; // otherwise the result is undefined. public native void ComputeRelative(@Const @ByRef TensorSlice sub, TensorSlice relative); // Updates the slice in such a way that it fully covers "other" slice. // Note, "other" slice should refer to the same tensor shape. // Example: // given a slice [2:4, :, 3:] and "other" slice [:, 1:4, 2:4] the // updated slice would be [:, :, 2:]. Here is why: // dim 0: "2:4" U ":" -> ":" // dim 1: ":" U "1-4" -> ":" // dim 2: "3:" U "2:4" -> "2:" public native void UpdateToCover(@Const @ByRef TensorSlice other); // Returns true if the length field was specified in an Extent. public static native @Cast("bool") boolean HasExtentLength(@Cast("const tensorflow::TensorSliceProto::Extent*") @ByRef TensorSliceProto_Extent extent); // Returns the value of the length field in an Extent, or -1 if it // is not present. public static native @Cast("tensorflow::int64") long GetExtentLength(@Cast("const tensorflow::TensorSliceProto::Extent*") @ByRef TensorSliceProto_Extent extent); } // namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_TENSOR_SLICE_H_ // Parsed from tensorflow/core/util/tensor_slice_set.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // A class to manage slices of a tensor. You can "register" set of slices for a // tensor and then "query" if we have data for a given slice. // TODO(yangke): consider moving it to a more private place so that we don't // need to expose the API. // #ifndef TENSORFLOW_UTIL_TENSOR_SLICE_SET_H_ // #define TENSORFLOW_UTIL_TENSOR_SLICE_SET_H_ // #include // for string // #include // #include // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/tensor_slice.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/core/status.h" // for Status // #include "tensorflow/core/lib/core/stringpiece.h" // for StringPiece // #include "tensorflow/core/platform/types.h" // for int64 @Namespace("tensorflow::checkpoint") @NoOffset public static class TensorSliceSet extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorSliceSet(Pointer p) { super(p); } public TensorSliceSet(@Const @ByRef TensorShape shape, @Cast("tensorflow::DataType") int type) { super((Pointer)null); allocate(shape, type); } private native void allocate(@Const @ByRef TensorShape shape, @Cast("tensorflow::DataType") int type); public native @Const @ByRef TensorShape shape(); public native @Cast("const tensorflow::DataType") int type(); // Register a new slice for the tensor. The "tag" is an arbitrary string // associated with the slice (in one application it denotes the name of the // file that contains the slice); the "data" points to the data of the tensor // slice (it can be a nullptr). // We don't take the ownership of "data" and the caller needs to make sure // the data is always available during the life time of the tensor slice set // if it is not nullptr. public native @ByVal Status Register(@Const @ByRef TensorSlice slice, @StdString BytePointer tag, @Const FloatPointer data); public native @ByVal Status Register(@Const @ByRef TensorSlice slice, @StdString String tag, @Const FloatBuffer data); public native @ByVal Status Register(@Const @ByRef TensorSlice slice, @StdString BytePointer tag, @Const float... data); public native @ByVal Status Register(@Const @ByRef TensorSlice slice, @StdString String tag, @Const FloatPointer data); public native @ByVal Status Register(@Const @ByRef TensorSlice slice, @StdString BytePointer tag, @Const FloatBuffer data); public native @ByVal Status Register(@Const @ByRef TensorSlice slice, @StdString String tag, @Const float... data); // Query about a new slice: checks if we have data for "slice" and if we have // the data and "data" is not nullptr, fill "data" with the slice data. The // caller needs to make sure "data" point to a large enough buffer. // TODO(yangke): avoid unnecessary copying by using a core::RefCounted // pointer. public native @Cast("bool") boolean Query(@Const @ByRef TensorSlice slice, FloatPointer data); public native @Cast("bool") boolean Query(@Const @ByRef TensorSlice slice, FloatBuffer data); public native @Cast("bool") boolean Query(@Const @ByRef TensorSlice slice, float... data); // Alternative way of querying about a new slice: instead of copying the // data, it returns a list of meta data about the stored slices that will // supply data for the slice. public native @Cast("bool") boolean QueryMeta( @Const @ByRef TensorSlice slice, @StdVector TensorSlideStringPair results); public static class SliceInfo extends Pointer { static { Loader.load(); } /** Default native constructor. */ public SliceInfo() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public SliceInfo(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SliceInfo(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public SliceInfo position(long position) { return (SliceInfo)super.position(position); } public native @ByRef TensorSlice slice(); public native SliceInfo slice(TensorSlice slice); public native @StdString BytePointer tag(); public native SliceInfo tag(BytePointer tag); @MemberGetter public native @Const FloatPointer data(); public native @Cast("tensorflow::int64") long num_floats(); public native SliceInfo num_floats(long num_floats); } // Returns the map from slice string to SliceInfo. public native @Const @ByRef StringSliceInfoMap Slices(); } // Registers "slice" in the TensorSliceSet stored in "tensor_slices", under key // "name". Other arguments are used for validations. Does not modify the map // or its values on non-OK. // REQUIRES: tensor_slices != nullptr @Namespace("tensorflow::checkpoint") public static native @ByVal Status RegisterTensorSlice( @StdString BytePointer name, @Const @ByRef TensorShape shape, @Cast("tensorflow::DataType") int type, @StdString BytePointer tag, @Const @ByRef TensorSlice slice, StringTensorSliceSetMap tensor_slices); @Namespace("tensorflow::checkpoint") public static native @ByVal Status RegisterTensorSlice( @StdString String name, @Const @ByRef TensorShape shape, @Cast("tensorflow::DataType") int type, @StdString String tag, @Const @ByRef TensorSlice slice, StringTensorSliceSetMap tensor_slices); // namespace checkpoint // namespace tensorflow // #endif // TENSORFLOW_UTIL_TENSOR_SLICE_SET_H_ // Parsed from tensorflow/core/util/tensor_slice_util.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_UTIL_TENSOR_SLICE_UTIL_H_ // #define TENSORFLOW_UTIL_TENSOR_SLICE_UTIL_H_ // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/tensor_slice.h" // #include "tensorflow/core/platform/logging.h" // Some hackery to invoke eigen tensor to copy over tensor slices with variable // dimension tensors. // TODO(yangke): get rid of that once the variable dimension tensor support is // in. @Namespace("tensorflow") @MemberGetter public static native int kTensorSliceMaxRank(); public static final int kTensorSliceMaxRank = kTensorSliceMaxRank(); // Create a tensor map with the given shape: we support up to 8 dimensions. If // the shape has less than 8 dimensions, we pad the remaining dimension with 1. // For everything except string, a standard Eigen cast and assignment works // Eigen makes it extremely difficult to dereference a tensor of string* into // string, so we roll our own loop instead. @Name("tensorflow::CopyThatWorksWithStringPointer") public static class CopyThatWorksWithStringPointer extends Pointer { static { Loader.load(); } /** Default native constructor. */ public CopyThatWorksWithStringPointer() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public CopyThatWorksWithStringPointer(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CopyThatWorksWithStringPointer(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public CopyThatWorksWithStringPointer position(long position) { return (CopyThatWorksWithStringPointer)super.position(position); } } // Checkpointing of half is done by storing the raw 16 bits as a signed 32bit // integer. To restore the checkpoint we need to do the reverse operation by // reinterpreting the integer as a 16 bit float. This prevents us from using // the default cast operation. // Given a tensor described by "shape", two slices "slice_s" and "slice_d", // and two pointers "ptr_s" and "ptr_d", where "ptr_s" points to a chunk of // memory that stores the data for "slice_s" and "ptr_d" points to a chunk of // memory that stores the data for "slice_d". This function copies the data // that belongs to the intersection of the two slices from slice_s to // slice_d. Uses Tensor cast() to convert from SrcT to DstT. Returns true // iff the two slices share any intersection (and thus some data is copied). // TODO(yangke): figure out if we can make it private. // namespace // namespace tensorflow // #endif // TENSORFLOW_UTIL_TENSOR_SLICE_UTIL_H_ // Parsed from tensorflow/core/util/tensor_slice_reader.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // The utility to read checkpoints for google brain tensor ops and v3 // checkpoints for dist_belief. // #ifndef TENSORFLOW_CORE_UTIL_TENSOR_SLICE_READER_H_ // #define TENSORFLOW_CORE_UTIL_TENSOR_SLICE_READER_H_ // #include // #include // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/tensor_slice.h" // #include "tensorflow/core/framework/types.pb.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/lib/core/stringpiece.h" // #include "tensorflow/core/lib/gtl/map_util.h" // #include "tensorflow/core/platform/logging.h" // #include "tensorflow/core/platform/macros.h" // #include "tensorflow/core/platform/mutex.h" // #include "tensorflow/core/platform/protobuf.h" // #include "tensorflow/core/platform/types.h" // #include "tensorflow/core/util/saved_tensor_slice.pb.h" // #include "tensorflow/core/util/saved_tensor_slice_util.h" // #include "tensorflow/core/util/tensor_slice_set.h" // #include "tensorflow/core/util/tensor_slice_util.h" // The reader reads in all the meta data about all the tensor slices. Then it // will try to read the relevant data on-demand to produce the data for the // slices needed. // NOTE(yangke): another way to do this is to first load a list of the tensor // slices needed and then just selectively read some of the meta data. That // might optimize the loading but makes the logic a bit more complicated. We // might want to revisit that. // TODO(yangke): consider moving to TensorProto. @Namespace("tensorflow::checkpoint") @NoOffset public static class TensorSliceReader extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorSliceReader(Pointer p) { super(p); } // Abstract interface for reading data out of a tensor slice checkpoint file public static class Table extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Table(Pointer p) { super(p); } public native @Cast("bool") boolean Get(@StdString BytePointer key, @StdString @Cast({"char*", "std::string*"}) BytePointer value); public native @Cast("bool") boolean Get(@StdString String key, @StdString @Cast({"char*", "std::string*"}) BytePointer value); } @MemberGetter public static native int kLoadAllShards(); public static final int kLoadAllShards = kLoadAllShards(); public TensorSliceReader(@StdString BytePointer filepattern) { super((Pointer)null); allocate(filepattern); } private native void allocate(@StdString BytePointer filepattern); public TensorSliceReader(@StdString String filepattern) { super((Pointer)null); allocate(filepattern); } private native void allocate(@StdString String filepattern); public TensorSliceReader(@StdString BytePointer filepattern, @ByVal @Cast("tensorflow::checkpoint::TensorSliceReader::OpenTableFunction*") Pointer open_function) { super((Pointer)null); allocate(filepattern, open_function); } private native void allocate(@StdString BytePointer filepattern, @ByVal @Cast("tensorflow::checkpoint::TensorSliceReader::OpenTableFunction*") Pointer open_function); public TensorSliceReader(@StdString String filepattern, @ByVal @Cast("tensorflow::checkpoint::TensorSliceReader::OpenTableFunction*") Pointer open_function) { super((Pointer)null); allocate(filepattern, open_function); } private native void allocate(@StdString String filepattern, @ByVal @Cast("tensorflow::checkpoint::TensorSliceReader::OpenTableFunction*") Pointer open_function); public TensorSliceReader(@StdString BytePointer filepattern, @ByVal @Cast("tensorflow::checkpoint::TensorSliceReader::OpenTableFunction*") Pointer open_function, int preferred_shard) { super((Pointer)null); allocate(filepattern, open_function, preferred_shard); } private native void allocate(@StdString BytePointer filepattern, @ByVal @Cast("tensorflow::checkpoint::TensorSliceReader::OpenTableFunction*") Pointer open_function, int preferred_shard); public TensorSliceReader(@StdString String filepattern, @ByVal @Cast("tensorflow::checkpoint::TensorSliceReader::OpenTableFunction*") Pointer open_function, int preferred_shard) { super((Pointer)null); allocate(filepattern, open_function, preferred_shard); } private native void allocate(@StdString String filepattern, @ByVal @Cast("tensorflow::checkpoint::TensorSliceReader::OpenTableFunction*") Pointer open_function, int preferred_shard); // Get the filename this reader is attached to. public native @StdString BytePointer filepattern(); // Get the number of files matched. public native int num_files(); // Get the status of the reader. public native @Const @ByVal Status status(); // Checks if the reader contains any slice of a tensor. In case the reader // does contain the tensor, if "shape" is not nullptr, fill "shape" with the // shape of the tensor; if "type" is not nullptr, fill "type" with the type // of the tensor. public native @Cast("bool") boolean HasTensor(@StdString BytePointer name, TensorShape shape, @Cast("tensorflow::DataType*") IntPointer type); public native @Cast("bool") boolean HasTensor(@StdString String name, TensorShape shape, @Cast("tensorflow::DataType*") IntPointer type); // Checks if the reader contains all the data about a tensor slice, and if // yes, copies the data of the slice to "data". The caller needs to make sure // that "data" points to a buffer that holds enough data. // This is a slow function since it needs to read sstables. // Get the tensors. public native @Const @ByRef StringTensorSliceSetMap Tensors(); // Returns value for one tensor. Only single slice checkpoints are supported // at the moment. public native @ByVal Status GetTensor(@StdString BytePointer name, @UniquePtr Tensor out_tensor); public native @ByVal Status GetTensor(@StdString String name, @UniquePtr Tensor out_tensor); // Returns a map from tensor name to shape. public native @ByVal VarToShapeMap GetVariableToShapeMap(); // Returns a map from tensor name to data type. public native @ByVal @Cast("tensorflow::checkpoint::TensorSliceReader::VarToDataTypeMap*") VarToShapeMap GetVariableToDataTypeMap(); // Returns a string containing names and shapes of all the tensors. public native @StdString BytePointer DebugString(); } @Namespace("tensorflow::checkpoint") public static native @ByVal Status OpenTableTensorSliceReader(@StdString BytePointer fname, @Cast("tensorflow::checkpoint::TensorSliceReader::Table**") PointerPointer table); @Namespace("tensorflow::checkpoint") public static native @ByVal Status OpenTableTensorSliceReader(@StdString BytePointer fname, @ByPtrPtr TensorSliceReader.Table table); @Namespace("tensorflow::checkpoint") public static native @ByVal Status OpenTableTensorSliceReader(@StdString String fname, @ByPtrPtr TensorSliceReader.Table table); // namespace checkpoint // namespace tensorflow // #endif // TENSORFLOW_CORE_UTIL_TENSOR_SLICE_READER_H_ // Parsed from tensorflow/core/util/tensor_bundle/tensor_bundle.h /* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // A tensor bundle is a set of immutable persistent files storing a set of named // tensors. It is designed for checkpointing TensorFlow tensors. // // The paths of the managed files share a common prefix; e.g., with the prefix: // /fs/model/train/ckpt-step/ckpt // // the bundle may contain a metadata file, and sharded data files: // /fs/model/train/ckpt-step/ // ckpt.index // ckpt.data-00000-of-00020 // ckpt.data-00001-of-00020 // ... // ckpt.data-00019-of-00020 // // The ".index" file is a string-string immutable table // (tensorflow::table::Table). Each key is a name of a tensor and its value is // a serialized BundleEntryProto. Each BundleEntryProto describes the metadata // of a tensor: which of the "data" files contains the content of a tensor, the // offset into that file, checksum, some auxiliary data, etc. // // A tensor bundle can be accessed randomly using a BundleReader. Usage: // // BundleReader reader(env, "/fs/model/train/ckpt-step/ckpt"); // reader.Lookup("name", &tensor); // // A tensor bundle can be built using BundleWriter. Each BundleWriter builds a // single data file bundle. Multiple bundles can then be merged by // MergeBundles() without reading and writing large chunk of data: it reads the // metadata files and outputs a single merged metadata. Typical usage: // // worker 0: // BundleWriter writer(env, "/fs/model/train/ckpt-step/tmp/worker0-step"); // writer.Add(...); // Adds the tensors on this worker. // writer.Finish(); // Flushes. // worker 1: // BundleWriter writer(env, "/fs/model/train/ckpt-step/tmp/worker1-step"); // writer.Add(...); // writer.Finish(); // worker 2: // MergeBundles(env, // {"/fs/model/train/ckpt-step/tmp/worker0-step", // "/fs/model/train/ckpt-step/tmp/worker1-step"}, // "/fs/model/train/ckpt-step/ckpt" /* merged prefix */); // // #ifndef TENSORFLOW_CORE_UTIL_TENSOR_BUNDLE_TENSOR_BUNDLE_H_ // #define TENSORFLOW_CORE_UTIL_TENSOR_BUNDLE_TENSOR_BUNDLE_H_ // #include "tensorflow/core/protobuf/tensor_bundle.pb.h" // #include // #include // #include // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/tensor_slice.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/lib/gtl/array_slice.h" // #include "tensorflow/core/lib/io/inputbuffer.h" // #include "tensorflow/core/lib/io/table.h" // #include "tensorflow/core/platform/env.h" // #include "tensorflow/core/platform/file_system.h" // #include "tensorflow/core/platform/macros.h" // #include "tensorflow/core/platform/types.h" // #include "tensorflow/core/util/tensor_bundle/naming.h" // #include "tensorflow/core/util/tensor_slice_set.h" // Versioning of the tensor bundle format. // Follows the same rules as 3p/tf/core/public/version.h. // // History: // 0. Any tensor bundles produced before this field was added. // 1. Added this field (2016-09-14). @Namespace("tensorflow") @MemberGetter public static native int kTensorBundleMinProducer(); @Namespace("tensorflow") @MemberGetter public static native int kTensorBundleMinConsumer(); @Namespace("tensorflow") @MemberGetter public static native int kTensorBundleVersion(); // The empty string, hence always the first key in the metadata table. Its // corresponding value is a BundleHeaderProto. @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer kHeaderEntryKey(); // Builds a string-string table of tensor names to BundleEntryProto (metadata). // // On construction, attempts to create a directory given by the dirname of // "prefix", so "status()" must be checked before calling any member functions. // // All threads accessing the same BundleWriter must synchronize. @Namespace("tensorflow") @NoOffset public static class BundleWriter extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BundleWriter(Pointer p) { super(p); } @NoOffset public static class Options extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Options(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Options(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public Options position(long position) { return (Options)super.position(position); } public Options() { super((Pointer)null); allocate(); } private native void allocate(); // Alignment, in bytes, for tensor data. // Must be >= 1. The default size of 1 densely packs tensors. public native int data_alignment(); public native Options data_alignment(int data_alignment); } public BundleWriter(Env env, @StringPiece BytePointer prefix, @Const @ByRef(nullValue = "tensorflow::BundleWriter::Options()") Options options) { super((Pointer)null); allocate(env, prefix, options); } private native void allocate(Env env, @StringPiece BytePointer prefix, @Const @ByRef(nullValue = "tensorflow::BundleWriter::Options()") Options options); public BundleWriter(Env env, @StringPiece BytePointer prefix) { super((Pointer)null); allocate(env, prefix); } private native void allocate(Env env, @StringPiece BytePointer prefix); public BundleWriter(Env env, @StringPiece String prefix, @Const @ByRef(nullValue = "tensorflow::BundleWriter::Options()") Options options) { super((Pointer)null); allocate(env, prefix, options); } private native void allocate(Env env, @StringPiece String prefix, @Const @ByRef(nullValue = "tensorflow::BundleWriter::Options()") Options options); public BundleWriter(Env env, @StringPiece String prefix) { super((Pointer)null); allocate(env, prefix); } private native void allocate(Env env, @StringPiece String prefix); // Adds the tensor "val" under key "key". // Across calls "key" must be unique but can be added in any order. public native @ByVal Status Add(@StringPiece BytePointer key, @Const @ByRef Tensor val); public native @ByVal Status Add(@StringPiece String key, @Const @ByRef Tensor val); // Partitioned variables support. // A slice of a full tensor is stored in two entries in the metadata table: // // full_tensor_key -> BundleEntryProto, describing all stored slices // of this full tensor. Does not append to the data // file. // encoded slice key -> BundleEntryProto, describing one particular slice. // Appends values of this slice to the data file. // // Slices of a full tensor can be added in any order. // // If a full tensor has slices placed on N devices and N BundleWriter's are // concurrently used, the caller must use MergeBundles() to ensure that a // consistent entry for "full_tensor_key" is produced. // // Returns an error if the same slice is added the second time. public native @ByVal Status AddSlice(@StringPiece BytePointer full_tensor_key, @Const @ByRef TensorShape full_tensor_shape, @Const @ByRef TensorSlice slice_spec, @Const @ByRef Tensor slice_tensor); public native @ByVal Status AddSlice(@StringPiece String full_tensor_key, @Const @ByRef TensorShape full_tensor_shape, @Const @ByRef TensorSlice slice_spec, @Const @ByRef Tensor slice_tensor); // Finishes the writer and flushes. public native @ByVal Status Finish(); public native @ByVal Status status(); } // Merges a set of bundles (given their prefixes) into a single bundle with the // given "merged_prefix". The merged metadata is guaranteed to be consistent. // // If there are N bundles in "prefixes", during the merge the data files will be // renamed to contain a proper sharded file spec, with num_shards set to the sum // of num_shards across the N input bundles. // // The caller should only rely on the metadata file of the merged bundle to // query information about a tensor. In particular, this function does not // guarantee not to re-order the input data files. // // Once merged, makes a best effort to delete the old metadata files. // Returns OK iff all bundles are successfully merged. @Namespace("tensorflow") public static native @ByVal Status MergeBundles(Env env, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector prefixes, @StringPiece BytePointer merged_prefix); @Namespace("tensorflow") public static native @ByVal Status MergeBundles(Env env, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector prefixes, @StringPiece String merged_prefix); // On construction, silently attempts to read the metadata associated with // "prefix". If caller intends to call any function afterwards, "status()" // must be checked. // All threads accessing the same BundleReader must synchronize. @Namespace("tensorflow") @NoOffset public static class BundleReader extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BundleReader(Pointer p) { super(p); } public BundleReader(Env env, @StringPiece BytePointer prefix) { super((Pointer)null); allocate(env, prefix); } private native void allocate(Env env, @StringPiece BytePointer prefix); public BundleReader(Env env, @StringPiece String prefix) { super((Pointer)null); allocate(env, prefix); } private native void allocate(Env env, @StringPiece String prefix); // Is ok() iff the reader construction is successful (completed the read of // the metadata). public native @ByVal Status status(); // Queries whether the bundle contains an entry keyed by "key". Calls Seek() // internally, so this call invalidates the reader's current position. // REQUIRES: status().ok() public native @Cast("bool") boolean Contains(@StringPiece BytePointer key); public native @Cast("bool") boolean Contains(@StringPiece String key); // Looks up the dtype and the shape of the tensor keyed by "key". // REQUIRES: status().ok() public native @ByVal Status LookupDtypeAndShape(@StringPiece BytePointer key, @Cast("tensorflow::DataType*") IntPointer dtype, TensorShape shape); public native @ByVal Status LookupDtypeAndShape(@StringPiece String key, @Cast("tensorflow::DataType*") IntPointer dtype, TensorShape shape); // Looks up the shape of the tensor keyed by "key". // Clears "shape" if not found. // REQUIRES: status().ok() public native @ByVal Status LookupTensorShape(@StringPiece BytePointer key, TensorShape shape); public native @ByVal Status LookupTensorShape(@StringPiece String key, TensorShape shape); // Looks up the tensor keyed by "key". If "key" refers to a partitioned // tensor, attempts to look up the full contents using all stored slices. // // Caller must make sure "val" has the same shape and dtype as the // corresponding contents, so that its buffer can be filled without needing // extra allocation. These can be queried via "LookupDtypeAndShape()". // // On error, "val" may contain nonsense data. Returns a NotFound error if // tensor keyed by "key" does not exist in this bundle. // // Validates the stored crc32c checksum against the restored bytes. // REQUIRES: status().ok() public native @ByVal Status Lookup(@StringPiece BytePointer key, Tensor val); public native @ByVal Status Lookup(@StringPiece String key, Tensor val); // Looks up the tensor pointed to by the internal iterator. // // On error, "val" may contain nonsense data. // // Validates the stored crc32c checksum against the restored bytes. // REQUIRES: status().ok() && Valid() public native @ByVal Status ReadCurrent(Tensor val); // Looks up the slices of the tensor keyed by "key". On OK, "slices" // is non-empty if and only if the tensor is a partitioned tensor. // // Warning - there is no guaranteed ordering for the returned slices, so // a slice with a larger start index in some dimension could come before // another slice with a smaller start index in the same dimension. // REQUIRES: status().ok() public native @ByVal Status LookupTensorSlices(@StringPiece BytePointer key, @StdVector TensorSlice slices); public native @ByVal Status LookupTensorSlices(@StringPiece String key, @StdVector TensorSlice slices); // Looks up a specific slice of a partitioned tensor. // It is only required that the stored slices cover the requested slice, // namely "slice_spec" is a subset of the union of the stored slices. // REQUIRES: status().ok() public native @ByVal Status LookupSlice(@StringPiece BytePointer full_tensor_key, @Const @ByRef TensorSlice slice_spec, Tensor val); public native @ByVal Status LookupSlice(@StringPiece String full_tensor_key, @Const @ByRef TensorSlice slice_spec, Tensor val); // Seeks to the first position in the bundle whose key is no less than "key". // REQUIRES: status().ok() public native void Seek(@StringPiece BytePointer key); public native void Seek(@StringPiece String key); // Moves to the next position in the bundle. // REQUIRES: status().ok() public native void Next(); // Returns true iff the reader is positioned to a key/val pair. // REQUIRES: status().ok() public native @Cast("bool") boolean Valid(); // Returns the key at the current position. // REQUIRES: status().ok() && Valid() public native @StringPiece BytePointer key(); // Returns the raw value at the current position. // REQUIRES: status().ok() && Valid() public native @StringPiece BytePointer value(); public native @StdString BytePointer DebugString(); } // A buffering wrapper for a WritableFile. Useful if the caller wishes to issue // small writes to a file (e.g. writing out a list of small varints). // External synchronization must be used in the presence of concurrent callers. @Namespace("tensorflow") @NoOffset public static class FileOutputBuffer extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FileOutputBuffer(Pointer p) { super(p); } public FileOutputBuffer(WritableFile file, @Cast("size_t") long buffer_size) { super((Pointer)null); allocate(file, buffer_size); } private native void allocate(WritableFile file, @Cast("size_t") long buffer_size); // Buffered append. public native @ByVal Status Append(@StringPiece BytePointer data); public native @ByVal Status Append(@StringPiece String data); // Returns the running crc32c checksum of all currently appended bytes. public native int crc32c(); // Clears the running crc32c checksum. public native void clear_crc32c(); // Appends the buffered data, then closes the underlying file. public native @ByVal Status Close(); } // namespace tensorflow // #endif // TENSORFLOW_CORE_UTIL_TENSOR_BUNDLE_TENSOR_BUNDLE_H_ // Parsed from tensorflow/c/tf_status_helper.h /* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_C_TF_STATUS_HELPER_H_ // #define TENSORFLOW_C_TF_STATUS_HELPER_H_ // #include "tensorflow/c/c_api.h" // #include "tensorflow/core/lib/core/status.h" // Set the attribute of "tf_status" from the attributes of "status". @Namespace("tensorflow") public static native void Set_TF_Status_from_Status(TF_Status tf_status, @Const @ByRef Status status); // Returns a "status" from "tf_status". @Namespace("tensorflow") public static native @ByVal Status StatusFromTF_Status(@Const TF_Status tf_status); // namespace tensorflow // #endif // TENSORFLOW_C_TF_STATUS_HELPER_H_ // Parsed from tensorflow/c/checkpoint_reader.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_C_CHECKPOINT_READER_H_ // #define TENSORFLOW_C_CHECKPOINT_READER_H_ // #include // #include // #include "tensorflow/c/tf_status_helper.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/platform/types.h" // #include "tensorflow/core/util/tensor_bundle/tensor_bundle.h" // #include "tensorflow/core/util/tensor_slice_reader.h" // A wrapper around BundleReader (for V2 checkpoints) and // checkpoint::TensorSliceReader (for V1), that is more easily SWIG wrapped for // other languages. // // The class currently only interacts with single-slice (i.e., non-partitioned) // variables. @Namespace("tensorflow::checkpoint") @NoOffset public static class CheckpointReader extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CheckpointReader(Pointer p) { super(p); } public CheckpointReader(@StdString BytePointer filepattern, TF_Status out_status) { super((Pointer)null); allocate(filepattern, out_status); } private native void allocate(@StdString BytePointer filepattern, TF_Status out_status); public CheckpointReader(@StdString String filepattern, TF_Status out_status) { super((Pointer)null); allocate(filepattern, out_status); } private native void allocate(@StdString String filepattern, TF_Status out_status); public native @Cast("bool") boolean HasTensor(@StdString BytePointer name); public native @Cast("bool") boolean HasTensor(@StdString String name); public native @StdString BytePointer DebugString(); // Returns a map from variable names to their shapes. Slices of a partitioned // tensor are combined into a single entry. public native @Const @ByRef VarToShapeMap GetVariableToShapeMap(); // Returns a map from variable names to their data types. Slices of a // partitioned tensor are combined into a single entry. public native @Cast("const tensorflow::checkpoint::TensorSliceReader::VarToDataTypeMap*") @ByRef VarToShapeMap GetVariableToDataTypeMap(); // Attempts to look up the tensor named "name" and stores the found result in // "out_tensor". public native void GetTensor(@StdString BytePointer name, @UniquePtr Tensor out_tensor, TF_Status out_status); public native void GetTensor(@StdString String name, @UniquePtr Tensor out_tensor, TF_Status out_status); } // namespace checkpoint // namespace tensorflow // #endif // TENSORFLOW_C_CHECKPOINT_READER_H_ // Parsed from tensorflow/c/c_api.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_C_C_API_H_ // #define TENSORFLOW_C_C_API_H_ // #include // #include // -------------------------------------------------------------------------- // C API for TensorFlow. // // The API leans towards simplicity and uniformity instead of convenience // since most usage will be by language specific wrappers. // // Conventions: // * We use the prefix TF_ for everything in the API. // * Objects are always passed around as pointers to opaque structs // and these structs are allocated/deallocated via the API. // * TF_Status holds error information. It is an object type // and therefore is passed around as a pointer to an opaque // struct as mentioned above. // * Every call that has a TF_Status* argument clears it on success // and fills it with error info on failure. // * unsigned char is used for booleans (instead of the 'bool' type). // In C++ bool is a keyword while in C99 bool is a macro defined // in stdbool.h. It is possible for the two to be inconsistent. // For example, neither the C99 nor the C++11 standard force a byte // size on the bool type, so the macro defined in stdbool.h could // be inconsistent with the bool keyword in C++. Thus, the use // of stdbool.h is avoided and unsigned char is used instead. // * size_t is used to represent byte sizes of objects that are // materialized in the address space of the calling process. // * int is used as an index into arrays. // * Deletion functions are safe to call on nullptr. // // Questions left to address: // * Might at some point need a way for callers to provide their own Env. // * Maybe add TF_TensorShape that encapsulates dimension info. // // Design decisions made: // * Backing store for tensor memory has an associated deallocation // function. This deallocation function will point to client code // for tensors populated by the client. So the client can do things // like shadowing a numpy array. // * We do not provide TF_OK since it is not strictly necessary and we // are not optimizing for convenience. // * We make assumption that one session has one graph. This should be // fine since we have the ability to run sub-graphs. // * We could allow NULL for some arguments (e.g., NULL options arg). // However since convenience is not a primary goal, we don't do this. // * Devices are not in this API. Instead, they are created/used internally // and the API just provides high level controls over the number of // devices of each type. // Macro to control visibility of exported symbols in the shared library (.so, // .dylib, .dll). // This duplicates the TF_EXPORT macro definition in // tensorflow/core/platform/macros.h in order to keep this .h file independent // of any other includes.$a // #ifdef SWIG // #define TF_CAPI_EXPORT // #else // #endif // SWIG // #ifdef __cplusplus // #endif // -------------------------------------------------------------------------- // TF_Version returns a string describing version information of the // TensorFlow library. TensorFlow using semantic versioning. public static native @Cast("const char*") BytePointer TF_Version(); // -------------------------------------------------------------------------- // TF_DataType holds the type for a scalar value. E.g., one slot in a tensor. // The enum values here are identical to corresponding values in types.proto. /** enum TF_DataType */ public static final int TF_FLOAT = 1, TF_DOUBLE = 2, TF_INT32 = 3, // Int32 tensors are always in 'host' memory. TF_UINT8 = 4, TF_INT16 = 5, TF_INT8 = 6, TF_STRING = 7, TF_COMPLEX64 = 8, // Single-precision complex TF_COMPLEX = 8, // Old identifier kept for API backwards compatibility TF_INT64 = 9, TF_BOOL = 10, TF_QINT8 = 11, // Quantized int8 TF_QUINT8 = 12, // Quantized uint8 TF_QINT32 = 13, // Quantized int32 TF_BFLOAT16 = 14, // Float32 truncated to 16 bits. Only for cast ops. TF_QINT16 = 15, // Quantized int16 TF_QUINT16 = 16, // Quantized uint16 TF_UINT16 = 17, TF_COMPLEX128 = 18, // Double-precision complex TF_HALF = 19, TF_RESOURCE = 20, TF_VARIANT = 21, TF_UINT32 = 22, TF_UINT64 = 23; // TF_DataTypeSize returns the sizeof() for the underlying type corresponding // to the given TF_DataType enum value. Returns 0 for variable length types // (eg. TF_STRING) or on failure. public static native @Cast("size_t") long TF_DataTypeSize(@Cast("TF_DataType") int dt); // -------------------------------------------------------------------------- // TF_Code holds an error code. The enum values here are identical to // corresponding values in error_codes.proto. /** enum TF_Code */ public static final int TF_OK = 0, TF_CANCELLED = 1, TF_UNKNOWN = 2, TF_INVALID_ARGUMENT = 3, TF_DEADLINE_EXCEEDED = 4, TF_NOT_FOUND = 5, TF_ALREADY_EXISTS = 6, TF_PERMISSION_DENIED = 7, TF_UNAUTHENTICATED = 16, TF_RESOURCE_EXHAUSTED = 8, TF_FAILED_PRECONDITION = 9, TF_ABORTED = 10, TF_OUT_OF_RANGE = 11, TF_UNIMPLEMENTED = 12, TF_INTERNAL = 13, TF_UNAVAILABLE = 14, TF_DATA_LOSS = 15; // -------------------------------------------------------------------------- // TF_Status holds error information. It either has an OK code, or // else an error code with an associated error message. // Return a new status object. public static native TF_Status TF_NewStatus(); // Delete a previously created status object. public static native void TF_DeleteStatus(TF_Status arg0); // Record in *s. Any previous information is lost. // A common use is to clear a status: TF_SetStatus(s, TF_OK, ""); public static native void TF_SetStatus(TF_Status s, @Cast("TF_Code") int code, @Cast("const char*") BytePointer msg); public static native void TF_SetStatus(TF_Status s, @Cast("TF_Code") int code, String msg); // Return the code record in *s. public static native @Cast("TF_Code") int TF_GetCode(@Const TF_Status s); // Return a pointer to the (null-terminated) error message in *s. The // return value points to memory that is only usable until the next // mutation to *s. Always returns an empty string if TF_GetCode(s) is // TF_OK. public static native @Cast("const char*") BytePointer TF_Message(@Const TF_Status s); // -------------------------------------------------------------------------- // TF_Buffer holds a pointer to a block of data and its associated length. // Typically, the data consists of a serialized protocol buffer, but other data // may also be held in a buffer. // // By default, TF_Buffer itself does not do any memory management of the // pointed-to block. If need be, users of this struct should specify how to // deallocate the block by setting the `data_deallocator` function pointer. public static class TF_Buffer extends org.bytedeco.javacpp.helper.tensorflow.AbstractTF_Buffer { static { Loader.load(); } /** Default native constructor. */ public TF_Buffer() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TF_Buffer(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_Buffer(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public TF_Buffer position(long position) { return (TF_Buffer)super.position(position); } public native @Const Pointer data(); public native TF_Buffer data(Pointer data); public native @Cast("size_t") long length(); public native TF_Buffer length(long length); public static class Data_deallocator_Pointer_long extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Data_deallocator_Pointer_long(Pointer p) { super(p); } protected Data_deallocator_Pointer_long() { allocate(); } private native void allocate(); public native void call(Pointer data, @Cast("size_t") long length); } public native Data_deallocator_Pointer_long data_deallocator(); public native TF_Buffer data_deallocator(Data_deallocator_Pointer_long data_deallocator); } // Makes a copy of the input and sets an appropriate deallocator. Useful for // passing in read-only, input protobufs. public static native TF_Buffer TF_NewBufferFromString(@Const Pointer proto, @Cast("size_t") long proto_len); // Useful for passing *out* a protobuf. public static native TF_Buffer TF_NewBuffer(); public static native void TF_DeleteBuffer(TF_Buffer arg0); public static native @ByVal TF_Buffer TF_GetBuffer(TF_Buffer buffer); // -------------------------------------------------------------------------- // TF_Tensor holds a multi-dimensional array of elements of a single data type. // For all types other than TF_STRING, the data buffer stores elements // in row major order. E.g. if data is treated as a vector of TF_DataType: // // element 0: index (0, ..., 0) // element 1: index (0, ..., 1) // ... // // The format for TF_STRING tensors is: // start_offset: array[uint64] // data: byte[...] // // The string length (as a varint), followed by the contents of the string // is encoded at data[start_offset[i]]]. TF_StringEncode and TF_StringDecode // facilitate this encoding. // Return a new tensor that holds the bytes data[0,len-1]. // // The data will be deallocated by a subsequent call to TF_DeleteTensor via: // (*deallocator)(data, len, deallocator_arg) // Clients must provide a custom deallocator function so they can pass in // memory managed by something like numpy. // // May return NULL (and invoke the deallocator) if the provided data buffer // (data, len) is inconsistent with a tensor of the given TF_DataType // and the shape specified by (dima, num_dims). public static class Deallocator_Pointer_long_Pointer extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Deallocator_Pointer_long_Pointer(Pointer p) { super(p); } protected Deallocator_Pointer_long_Pointer() { allocate(); } private native void allocate(); public native void call(Pointer data, @Cast("size_t") long len, Pointer arg); } public static native TF_Tensor TF_NewTensor( @Cast("TF_DataType") int arg0, @Cast("const int64_t*") LongPointer dims, int num_dims, Pointer data, @Cast("size_t") long len, Deallocator_Pointer_long_Pointer deallocator, Pointer deallocator_arg); public static native TF_Tensor TF_NewTensor( @Cast("TF_DataType") int arg0, @Cast("const int64_t*") LongBuffer dims, int num_dims, Pointer data, @Cast("size_t") long len, Deallocator_Pointer_long_Pointer deallocator, Pointer deallocator_arg); public static native TF_Tensor TF_NewTensor( @Cast("TF_DataType") int arg0, @Cast("const int64_t*") long[] dims, int num_dims, Pointer data, @Cast("size_t") long len, Deallocator_Pointer_long_Pointer deallocator, Pointer deallocator_arg); // Allocate and return a new Tensor. // // This function is an alternative to TF_NewTensor and should be used when // memory is allocated to pass the Tensor to the C API. The allocated memory // satisfies TensorFlow's memory alignment preferences and should be preferred // over calling malloc and free. // // The caller must set the Tensor values by writing them to the pointer returned // by TF_TensorData with length TF_TensorByteSize. public static native TF_Tensor TF_AllocateTensor(@Cast("TF_DataType") int arg0, @Cast("const int64_t*") LongPointer dims, int num_dims, @Cast("size_t") long len); public static native TF_Tensor TF_AllocateTensor(@Cast("TF_DataType") int arg0, @Cast("const int64_t*") LongBuffer dims, int num_dims, @Cast("size_t") long len); public static native TF_Tensor TF_AllocateTensor(@Cast("TF_DataType") int arg0, @Cast("const int64_t*") long[] dims, int num_dims, @Cast("size_t") long len); // Deletes `tensor` and returns a new TF_Tensor with the same content if // possible. Returns nullptr and leaves `tensor` untouched if not. public static native TF_Tensor TF_TensorMaybeMove(TF_Tensor tensor); // Destroy a tensor. public static native void TF_DeleteTensor(TF_Tensor arg0); // Return the type of a tensor element. public static native @Cast("TF_DataType") int TF_TensorType(@Const TF_Tensor arg0); // Return the number of dimensions that the tensor has. public static native int TF_NumDims(@Const TF_Tensor arg0); // Return the length of the tensor in the "dim_index" dimension. // REQUIRES: 0 <= dim_index < TF_NumDims(tensor) public static native @Cast("int64_t") long TF_Dim(@Const TF_Tensor tensor, int dim_index); // Return the size of the underlying data in bytes. public static native @Cast("size_t") long TF_TensorByteSize(@Const TF_Tensor arg0); // Return a pointer to the underlying data buffer. public static native Pointer TF_TensorData(@Const TF_Tensor arg0); // -------------------------------------------------------------------------- // Encode the string `src` (`src_len` bytes long) into `dst` in the format // required by TF_STRING tensors. Does not write to memory more than `dst_len` // bytes beyond `*dst`. `dst_len` should be at least // TF_StringEncodedSize(src_len). // // On success returns the size in bytes of the encoded string. // Returns an error into `status` otherwise. public static native @Cast("size_t") long TF_StringEncode(@Cast("const char*") BytePointer src, @Cast("size_t") long src_len, @Cast("char*") BytePointer dst, @Cast("size_t") long dst_len, TF_Status status); public static native @Cast("size_t") long TF_StringEncode(String src, @Cast("size_t") long src_len, @Cast("char*") ByteBuffer dst, @Cast("size_t") long dst_len, TF_Status status); public static native @Cast("size_t") long TF_StringEncode(@Cast("const char*") BytePointer src, @Cast("size_t") long src_len, @Cast("char*") byte[] dst, @Cast("size_t") long dst_len, TF_Status status); public static native @Cast("size_t") long TF_StringEncode(String src, @Cast("size_t") long src_len, @Cast("char*") BytePointer dst, @Cast("size_t") long dst_len, TF_Status status); public static native @Cast("size_t") long TF_StringEncode(@Cast("const char*") BytePointer src, @Cast("size_t") long src_len, @Cast("char*") ByteBuffer dst, @Cast("size_t") long dst_len, TF_Status status); public static native @Cast("size_t") long TF_StringEncode(String src, @Cast("size_t") long src_len, @Cast("char*") byte[] dst, @Cast("size_t") long dst_len, TF_Status status); // Decode a string encoded using TF_StringEncode. // // On success, sets `*dst` to the start of the decoded string and `*dst_len` to // its length. Returns the number of bytes starting at `src` consumed while // decoding. `*dst` points to memory within the encoded buffer. On failure, // `*dst` and `*dst_len` are undefined and an error is set in `status`. // // Does not read memory more than `src_len` bytes beyond `src`. public static native @Cast("size_t") long TF_StringDecode(@Cast("const char*") BytePointer src, @Cast("size_t") long src_len, @Cast("const char**") PointerPointer dst, @Cast("size_t*") SizeTPointer dst_len, TF_Status status); public static native @Cast("size_t") long TF_StringDecode(@Cast("const char*") BytePointer src, @Cast("size_t") long src_len, @Cast("const char**") @ByPtrPtr BytePointer dst, @Cast("size_t*") SizeTPointer dst_len, TF_Status status); public static native @Cast("size_t") long TF_StringDecode(String src, @Cast("size_t") long src_len, @Cast("const char**") @ByPtrPtr ByteBuffer dst, @Cast("size_t*") SizeTPointer dst_len, TF_Status status); public static native @Cast("size_t") long TF_StringDecode(@Cast("const char*") BytePointer src, @Cast("size_t") long src_len, @Cast("const char**") @ByPtrPtr byte[] dst, @Cast("size_t*") SizeTPointer dst_len, TF_Status status); public static native @Cast("size_t") long TF_StringDecode(String src, @Cast("size_t") long src_len, @Cast("const char**") @ByPtrPtr BytePointer dst, @Cast("size_t*") SizeTPointer dst_len, TF_Status status); public static native @Cast("size_t") long TF_StringDecode(@Cast("const char*") BytePointer src, @Cast("size_t") long src_len, @Cast("const char**") @ByPtrPtr ByteBuffer dst, @Cast("size_t*") SizeTPointer dst_len, TF_Status status); public static native @Cast("size_t") long TF_StringDecode(String src, @Cast("size_t") long src_len, @Cast("const char**") @ByPtrPtr byte[] dst, @Cast("size_t*") SizeTPointer dst_len, TF_Status status); // Return the size in bytes required to encode a string `len` bytes long into a // TF_STRING tensor. public static native @Cast("size_t") long TF_StringEncodedSize(@Cast("size_t") long len); // -------------------------------------------------------------------------- // TF_SessionOptions holds options that can be passed during session creation. // Return a new options object. public static native TF_SessionOptions TF_NewSessionOptions(); // Set the target in TF_SessionOptions.options. // target can be empty, a single entry, or a comma separated list of entries. // Each entry is in one of the following formats : // "local" // ip:port // host:port public static native void TF_SetTarget(TF_SessionOptions options, @Cast("const char*") BytePointer target); public static native void TF_SetTarget(TF_SessionOptions options, String target); // Set the config in TF_SessionOptions.options. // config should be a serialized tensorflow.ConfigProto proto. // If config was not parsed successfully as a ConfigProto, record the // error information in *status. public static native void TF_SetConfig(TF_SessionOptions options, @Const Pointer proto, @Cast("size_t") long proto_len, TF_Status status); // Destroy an options object. public static native void TF_DeleteSessionOptions(TF_SessionOptions arg0); // TODO(jeff,sanjay): // - export functions to set Config fields // -------------------------------------------------------------------------- // The new graph construction API, still under development. // Represents a computation graph. Graphs may be shared between sessions. // Graphs are thread-safe when used as directed below. // Return a new graph object. public static native TF_Graph TF_NewGraph(); // Destroy an options object. Graph will be deleted once no more // TFSession's are referencing it. public static native void TF_DeleteGraph(TF_Graph arg0); // Operation being built. The underlying graph must outlive this. // Operation that has been added to the graph. Valid until the graph is // deleted -- in particular adding a new operation to the graph does not // invalidate old TF_Operation* pointers. // Represents a specific input of an operation. public static class TF_Input extends Pointer { static { Loader.load(); } /** Default native constructor. */ public TF_Input() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TF_Input(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_Input(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public TF_Input position(long position) { return (TF_Input)super.position(position); } public native TF_Operation oper(); public native TF_Input oper(TF_Operation oper); public native int index(); public native TF_Input index(int index); // The index of the input within oper. } // Represents a specific output of an operation. public static class TF_Output extends Pointer { static { Loader.load(); } /** Default native constructor. */ public TF_Output() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TF_Output(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_Output(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public TF_Output position(long position) { return (TF_Output)super.position(position); } public native TF_Operation oper(); public native TF_Output oper(TF_Operation oper); public native int index(); public native TF_Output index(int index); // The index of the output within oper. } // TF_Function is a grouping of operations with defined inputs and outputs. // Once created and added to graphs, functions can be invoked by creating an // operation whose operation type matches the function name. // Function definition options. TODO(iga): Define and implement @Opaque public static class TF_FunctionOptions extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public TF_FunctionOptions() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_FunctionOptions(Pointer p) { super(p); } } // Sets the shape of the Tensor referenced by `output` in `graph` to // the shape described by `dims` and `num_dims`. // // If the number of dimensions is unknown, `num_dims` must be set to // -1 and `dims` can be null. If a dimension is unknown, the // corresponding entry in the `dims` array must be -1. // // This does not overwrite the existing shape associated with `output`, // but merges the input shape with the existing shape. For example, // setting a shape of [-1, 2] with an existing shape [2, -1] would set // a final shape of [2, 2] based on shape merging semantics. // // Returns an error into `status` if: // * `output` is not in `graph`. // * An invalid shape is being set (e.g., the shape being set // is incompatible with the existing shape). public static native void TF_GraphSetTensorShape(TF_Graph graph, @ByVal TF_Output output, @Cast("const int64_t*") LongPointer dims, int num_dims, TF_Status status); public static native void TF_GraphSetTensorShape(TF_Graph graph, @ByVal TF_Output output, @Cast("const int64_t*") LongBuffer dims, int num_dims, TF_Status status); public static native void TF_GraphSetTensorShape(TF_Graph graph, @ByVal TF_Output output, @Cast("const int64_t*") long[] dims, int num_dims, TF_Status status); // Returns the number of dimensions of the Tensor referenced by `output` // in `graph`. // // If the number of dimensions in the shape is unknown, returns -1. // // Returns an error into `status` if: // * `output` is not in `graph`. public static native int TF_GraphGetTensorNumDims(TF_Graph graph, @ByVal TF_Output output, TF_Status status); // Returns the shape of the Tensor referenced by `output` in `graph` // into `dims`. `dims` must be an array large enough to hold `num_dims` // entries (e.g., the return value of TF_GraphGetTensorNumDims). // // If the number of dimensions in the shape is unknown or the shape is // a scalar, `dims` will remain untouched. Otherwise, each element of // `dims` will be set corresponding to the size of the dimension. An // unknown dimension is represented by `-1`. // // Returns an error into `status` if: // * `output` is not in `graph`. // * `num_dims` does not match the actual number of dimensions. public static native void TF_GraphGetTensorShape(TF_Graph graph, @ByVal TF_Output output, @Cast("int64_t*") LongPointer dims, int num_dims, TF_Status status); public static native void TF_GraphGetTensorShape(TF_Graph graph, @ByVal TF_Output output, @Cast("int64_t*") LongBuffer dims, int num_dims, TF_Status status); public static native void TF_GraphGetTensorShape(TF_Graph graph, @ByVal TF_Output output, @Cast("int64_t*") long[] dims, int num_dims, TF_Status status); // Operation will only be added to *graph when TF_FinishOperation() is // called (assuming TF_FinishOperation() does not return an error). // *graph must not be deleted until after TF_FinishOperation() is // called. public static native TF_OperationDescription TF_NewOperation( TF_Graph graph, @Cast("const char*") BytePointer op_type, @Cast("const char*") BytePointer oper_name); public static native TF_OperationDescription TF_NewOperation( TF_Graph graph, String op_type, String oper_name); // Specify the device for `desc`. Defaults to empty, meaning unconstrained. public static native void TF_SetDevice(TF_OperationDescription desc, @Cast("const char*") BytePointer device); public static native void TF_SetDevice(TF_OperationDescription desc, String device); // The calls to TF_AddInput and TF_AddInputList must match (in number, // order, and type) the op declaration. For example, the "Concat" op // has registration: // REGISTER_OP("Concat") // .Input("concat_dim: int32") // .Input("values: N * T") // .Output("output: T") // .Attr("N: int >= 2") // .Attr("T: type"); // that defines two inputs, "concat_dim" and "values" (in that order). // You must use TF_AddInput() for the first input (since it takes a // single tensor), and TF_AddInputList() for the second input (since // it takes a list, even if you were to pass a list with a single // tensor), as in: // TF_OperationDescription* desc = TF_NewOperation(graph, "Concat", "c"); // TF_Output concat_dim_input = {...}; // TF_AddInput(desc, concat_dim_input); // TF_Output values_inputs[5] = {{...}, ..., {...}}; // TF_AddInputList(desc, values_inputs, 5); // For inputs that take a single tensor. public static native void TF_AddInput(TF_OperationDescription desc, @ByVal TF_Output input); // For inputs that take a list of tensors. // inputs must point to TF_Output[num_inputs]. public static native void TF_AddInputList(TF_OperationDescription desc, @Const TF_Output inputs, int num_inputs); // Call once per control input to `desc`. public static native void TF_AddControlInput(TF_OperationDescription desc, TF_Operation input); // Request that `desc` be co-located on the device where `op` // is placed. // // Use of this is discouraged since the implementation of device placement is // subject to change. Primarily intended for internal libraries public static native void TF_ColocateWith(TF_OperationDescription desc, TF_Operation op); // Call some TF_SetAttr*() function for every attr that is not // inferred from an input and doesn't have a default value you wish to // keep. // `value` must point to a string of length `length` bytes. public static native void TF_SetAttrString(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Const Pointer value, @Cast("size_t") long length); public static native void TF_SetAttrString(TF_OperationDescription desc, String attr_name, @Const Pointer value, @Cast("size_t") long length); // `values` and `lengths` each must have lengths `num_values`. // `values[i]` must point to a string of length `lengths[i]` bytes. public static native void TF_SetAttrStringList(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const void*const*") PointerPointer values, @Cast("const size_t*") SizeTPointer lengths, int num_values); public static native void TF_SetAttrStringList(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const void*const*") @ByPtrPtr Pointer values, @Cast("const size_t*") SizeTPointer lengths, int num_values); public static native void TF_SetAttrStringList(TF_OperationDescription desc, String attr_name, @Cast("const void*const*") @ByPtrPtr Pointer values, @Cast("const size_t*") SizeTPointer lengths, int num_values); public static native void TF_SetAttrInt(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("int64_t") long value); public static native void TF_SetAttrInt(TF_OperationDescription desc, String attr_name, @Cast("int64_t") long value); public static native void TF_SetAttrIntList(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const int64_t*") LongPointer values, int num_values); public static native void TF_SetAttrIntList(TF_OperationDescription desc, String attr_name, @Cast("const int64_t*") LongBuffer values, int num_values); public static native void TF_SetAttrIntList(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const int64_t*") long[] values, int num_values); public static native void TF_SetAttrIntList(TF_OperationDescription desc, String attr_name, @Cast("const int64_t*") LongPointer values, int num_values); public static native void TF_SetAttrIntList(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const int64_t*") LongBuffer values, int num_values); public static native void TF_SetAttrIntList(TF_OperationDescription desc, String attr_name, @Cast("const int64_t*") long[] values, int num_values); public static native void TF_SetAttrFloat(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, float value); public static native void TF_SetAttrFloat(TF_OperationDescription desc, String attr_name, float value); public static native void TF_SetAttrFloatList(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Const FloatPointer values, int num_values); public static native void TF_SetAttrFloatList(TF_OperationDescription desc, String attr_name, @Const FloatBuffer values, int num_values); public static native void TF_SetAttrFloatList(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Const float[] values, int num_values); public static native void TF_SetAttrFloatList(TF_OperationDescription desc, String attr_name, @Const FloatPointer values, int num_values); public static native void TF_SetAttrFloatList(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Const FloatBuffer values, int num_values); public static native void TF_SetAttrFloatList(TF_OperationDescription desc, String attr_name, @Const float[] values, int num_values); public static native void TF_SetAttrBool(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("unsigned char") byte value); public static native void TF_SetAttrBool(TF_OperationDescription desc, String attr_name, @Cast("unsigned char") byte value); public static native void TF_SetAttrBoolList(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const unsigned char*") BytePointer values, int num_values); public static native void TF_SetAttrBoolList(TF_OperationDescription desc, String attr_name, @Cast("const unsigned char*") ByteBuffer values, int num_values); public static native void TF_SetAttrBoolList(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const unsigned char*") byte[] values, int num_values); public static native void TF_SetAttrBoolList(TF_OperationDescription desc, String attr_name, @Cast("const unsigned char*") BytePointer values, int num_values); public static native void TF_SetAttrBoolList(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const unsigned char*") ByteBuffer values, int num_values); public static native void TF_SetAttrBoolList(TF_OperationDescription desc, String attr_name, @Cast("const unsigned char*") byte[] values, int num_values); public static native void TF_SetAttrType(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("TF_DataType") int value); public static native void TF_SetAttrType(TF_OperationDescription desc, String attr_name, @Cast("TF_DataType") int value); public static native void TF_SetAttrTypeList(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const TF_DataType*") IntPointer values, int num_values); public static native void TF_SetAttrTypeList(TF_OperationDescription desc, String attr_name, @Cast("const TF_DataType*") IntBuffer values, int num_values); public static native void TF_SetAttrTypeList(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const TF_DataType*") int[] values, int num_values); public static native void TF_SetAttrTypeList(TF_OperationDescription desc, String attr_name, @Cast("const TF_DataType*") IntPointer values, int num_values); public static native void TF_SetAttrTypeList(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const TF_DataType*") IntBuffer values, int num_values); public static native void TF_SetAttrTypeList(TF_OperationDescription desc, String attr_name, @Cast("const TF_DataType*") int[] values, int num_values); // Set a 'func' attribute to the specified name. // `value` must point to a string of length `length` bytes. public static native void TF_SetAttrFuncName(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const char*") BytePointer value, @Cast("size_t") long length); public static native void TF_SetAttrFuncName(TF_OperationDescription desc, String attr_name, String value, @Cast("size_t") long length); // Set `num_dims` to -1 to represent "unknown rank". Otherwise, // `dims` points to an array of length `num_dims`. `dims[i]` must be // >= -1, with -1 meaning "unknown dimension". public static native void TF_SetAttrShape(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const int64_t*") LongPointer dims, int num_dims); public static native void TF_SetAttrShape(TF_OperationDescription desc, String attr_name, @Cast("const int64_t*") LongBuffer dims, int num_dims); public static native void TF_SetAttrShape(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const int64_t*") long[] dims, int num_dims); public static native void TF_SetAttrShape(TF_OperationDescription desc, String attr_name, @Cast("const int64_t*") LongPointer dims, int num_dims); public static native void TF_SetAttrShape(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const int64_t*") LongBuffer dims, int num_dims); public static native void TF_SetAttrShape(TF_OperationDescription desc, String attr_name, @Cast("const int64_t*") long[] dims, int num_dims); // `dims` and `num_dims` must point to arrays of length `num_shapes`. // Set `num_dims[i]` to -1 to represent "unknown rank". Otherwise, // `dims[i]` points to an array of length `num_dims[i]`. `dims[i][j]` // must be >= -1, with -1 meaning "unknown dimension". public static native void TF_SetAttrShapeList(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const int64_t*const*") PointerPointer dims, @Const IntPointer num_dims, int num_shapes); public static native void TF_SetAttrShapeList(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const int64_t*const*") @ByPtrPtr LongPointer dims, @Const IntPointer num_dims, int num_shapes); public static native void TF_SetAttrShapeList(TF_OperationDescription desc, String attr_name, @Cast("const int64_t*const*") @ByPtrPtr LongBuffer dims, @Const IntBuffer num_dims, int num_shapes); public static native void TF_SetAttrShapeList(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const int64_t*const*") @ByPtrPtr long[] dims, @Const int[] num_dims, int num_shapes); public static native void TF_SetAttrShapeList(TF_OperationDescription desc, String attr_name, @Cast("const int64_t*const*") @ByPtrPtr LongPointer dims, @Const IntPointer num_dims, int num_shapes); public static native void TF_SetAttrShapeList(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const int64_t*const*") @ByPtrPtr LongBuffer dims, @Const IntBuffer num_dims, int num_shapes); public static native void TF_SetAttrShapeList(TF_OperationDescription desc, String attr_name, @Cast("const int64_t*const*") @ByPtrPtr long[] dims, @Const int[] num_dims, int num_shapes); // `proto` must point to an array of `proto_len` bytes representing a // binary-serialized TensorShapeProto. public static native void TF_SetAttrTensorShapeProto( TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Const Pointer proto, @Cast("size_t") long proto_len, TF_Status status); public static native void TF_SetAttrTensorShapeProto( TF_OperationDescription desc, String attr_name, @Const Pointer proto, @Cast("size_t") long proto_len, TF_Status status); // `protos` and `proto_lens` must point to arrays of length `num_shapes`. // `protos[i]` must point to an array of `proto_lens[i]` bytes // representing a binary-serialized TensorShapeProto. public static native void TF_SetAttrTensorShapeProtoList( TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const void*const*") PointerPointer protos, @Cast("const size_t*") SizeTPointer proto_lens, int num_shapes, TF_Status status); public static native void TF_SetAttrTensorShapeProtoList( TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("const void*const*") @ByPtrPtr Pointer protos, @Cast("const size_t*") SizeTPointer proto_lens, int num_shapes, TF_Status status); public static native void TF_SetAttrTensorShapeProtoList( TF_OperationDescription desc, String attr_name, @Cast("const void*const*") @ByPtrPtr Pointer protos, @Cast("const size_t*") SizeTPointer proto_lens, int num_shapes, TF_Status status); public static native void TF_SetAttrTensor(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, TF_Tensor value, TF_Status status); public static native void TF_SetAttrTensor(TF_OperationDescription desc, String attr_name, TF_Tensor value, TF_Status status); public static native void TF_SetAttrTensorList(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Cast("TF_Tensor*const*") PointerPointer values, int num_values, TF_Status status); public static native void TF_SetAttrTensorList(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @ByPtrPtr TF_Tensor values, int num_values, TF_Status status); public static native void TF_SetAttrTensorList(TF_OperationDescription desc, String attr_name, @ByPtrPtr TF_Tensor values, int num_values, TF_Status status); // `proto` should point to a sequence of bytes of length `proto_len` // representing a binary serialization of an AttrValue protocol // buffer. public static native void TF_SetAttrValueProto(TF_OperationDescription desc, @Cast("const char*") BytePointer attr_name, @Const Pointer proto, @Cast("size_t") long proto_len, TF_Status status); public static native void TF_SetAttrValueProto(TF_OperationDescription desc, String attr_name, @Const Pointer proto, @Cast("size_t") long proto_len, TF_Status status); // If this function succeeds: // * *status is set to an OK value, // * a TF_Operation is added to the graph, // * a non-null value pointing to the added operation is returned -- // this value is valid until the underlying graph is deleted. // Otherwise: // * *status is set to a non-OK value, // * the graph is not modified, // * a null value is returned. // In either case, it deletes `desc`. public static native TF_Operation TF_FinishOperation( TF_OperationDescription desc, TF_Status status); // TF_Operation functions. Operations are immutable once created, so // these are all query functions. public static native @Cast("const char*") BytePointer TF_OperationName(TF_Operation oper); public static native @Cast("const char*") BytePointer TF_OperationOpType(TF_Operation oper); public static native @Cast("const char*") BytePointer TF_OperationDevice(TF_Operation oper); public static native int TF_OperationNumOutputs(TF_Operation oper); public static native @Cast("TF_DataType") int TF_OperationOutputType(@ByVal TF_Output oper_out); public static native int TF_OperationOutputListLength(TF_Operation oper, @Cast("const char*") BytePointer arg_name, TF_Status status); public static native int TF_OperationOutputListLength(TF_Operation oper, String arg_name, TF_Status status); public static native int TF_OperationNumInputs(TF_Operation oper); public static native @Cast("TF_DataType") int TF_OperationInputType(@ByVal TF_Input oper_in); public static native int TF_OperationInputListLength(TF_Operation oper, @Cast("const char*") BytePointer arg_name, TF_Status status); public static native int TF_OperationInputListLength(TF_Operation oper, String arg_name, TF_Status status); // In this code: // TF_Output producer = TF_OperationInput(consumer); // There is an edge from producer.oper's output (given by // producer.index) to consumer.oper's input (given by consumer.index). public static native @ByVal TF_Output TF_OperationInput(@ByVal TF_Input oper_in); // Get the number of current consumers of a specific output of an // operation. Note that this number can change when new operations // are added to the graph. public static native int TF_OperationOutputNumConsumers(@ByVal TF_Output oper_out); // Get list of all current consumers of a specific output of an // operation. `consumers` must point to an array of length at least // `max_consumers` (ideally set to // TF_OperationOutputNumConsumers(oper_out)). Beware that a concurrent // modification of the graph can increase the number of consumers of // an operation. Returns the number of output consumers (should match // TF_OperationOutputNumConsumers(oper_out)). public static native int TF_OperationOutputConsumers(@ByVal TF_Output oper_out, TF_Input consumers, int max_consumers); // Get the number of control inputs to an operation. public static native int TF_OperationNumControlInputs(TF_Operation oper); // Get list of all control inputs to an operation. `control_inputs` must // point to an array of length `max_control_inputs` (ideally set to // TF_OperationNumControlInputs(oper)). Returns the number of control // inputs (should match TF_OperationNumControlInputs(oper)). public static native int TF_OperationGetControlInputs( TF_Operation oper, @Cast("TF_Operation**") PointerPointer control_inputs, int max_control_inputs); public static native int TF_OperationGetControlInputs( TF_Operation oper, @ByPtrPtr TF_Operation control_inputs, int max_control_inputs); // Get the number of operations that have `*oper` as a control input. // Note that this number can change when new operations are added to // the graph. public static native int TF_OperationNumControlOutputs(TF_Operation oper); // Get the list of operations that have `*oper` as a control input. // `control_outputs` must point to an array of length at least // `max_control_outputs` (ideally set to // TF_OperationNumControlOutputs(oper)). Beware that a concurrent // modification of the graph can increase the number of control // outputs. Returns the number of control outputs (should match // TF_OperationNumControlOutputs(oper)). public static native int TF_OperationGetControlOutputs( TF_Operation oper, @Cast("TF_Operation**") PointerPointer control_outputs, int max_control_outputs); public static native int TF_OperationGetControlOutputs( TF_Operation oper, @ByPtrPtr TF_Operation control_outputs, int max_control_outputs); // TF_AttrType describes the type of the value of an attribute on an operation. /** enum TF_AttrType */ public static final int TF_ATTR_STRING = 0, TF_ATTR_INT = 1, TF_ATTR_FLOAT = 2, TF_ATTR_BOOL = 3, TF_ATTR_TYPE = 4, TF_ATTR_SHAPE = 5, TF_ATTR_TENSOR = 6, TF_ATTR_PLACEHOLDER = 7, TF_ATTR_FUNC = 8; // TF_AttrMetadata describes the value of an attribute on an operation. public static class TF_AttrMetadata extends Pointer { static { Loader.load(); } /** Default native constructor. */ public TF_AttrMetadata() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TF_AttrMetadata(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_AttrMetadata(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public TF_AttrMetadata position(long position) { return (TF_AttrMetadata)super.position(position); } // A boolean: 1 if the attribute value is a list, 0 otherwise. public native @Cast("unsigned char") byte is_list(); public native TF_AttrMetadata is_list(byte is_list); // Length of the list if is_list is true. Undefined otherwise. public native @Cast("int64_t") long list_size(); public native TF_AttrMetadata list_size(long list_size); // Type of elements of the list if is_list != 0. // Type of the single value stored in the attribute if is_list == 0. public native @Cast("TF_AttrType") int type(); public native TF_AttrMetadata type(int type); // Total size the attribute value. // The units of total_size depend on is_list and type. // (1) If type == TF_ATTR_STRING and is_list == 0 // then total_size is the byte size of the string // valued attribute. // (2) If type == TF_ATTR_STRING and is_list == 1 // then total_size is the cumulative byte size // of all the strings in the list. // (3) If type == TF_ATTR_SHAPE and is_list == 0 // then total_size is the number of dimensions // of the shape valued attribute, or -1 // if its rank is unknown. // (4) If type == TF_ATTR_SHAPE and is_list == 1 // then total_size is the cumulative number // of dimensions of all shapes in the list. // (5) Otherwise, total_size is undefined. public native @Cast("int64_t") long total_size(); public native TF_AttrMetadata total_size(long total_size); } // Returns metadata about the value of the attribute `attr_name` of `oper`. public static native @ByVal TF_AttrMetadata TF_OperationGetAttrMetadata( TF_Operation oper, @Cast("const char*") BytePointer attr_name, TF_Status status); public static native @ByVal TF_AttrMetadata TF_OperationGetAttrMetadata( TF_Operation oper, String attr_name, TF_Status status); // Fills in `value` with the value of the attribute `attr_name`. `value` must // point to an array of length at least `max_length` (ideally set to // TF_AttrMetadata.total_size from TF_OperationGetAttrMetadata(oper, // attr_name)). public static native void TF_OperationGetAttrString(TF_Operation oper, @Cast("const char*") BytePointer attr_name, Pointer value, @Cast("size_t") long max_length, TF_Status status); public static native void TF_OperationGetAttrString(TF_Operation oper, String attr_name, Pointer value, @Cast("size_t") long max_length, TF_Status status); // Get the list of strings in the value of the attribute `attr_name`. Fills in // `values` and `lengths`, each of which must point to an array of length at // least `max_values`. // // The elements of values will point to addresses in `storage` which must be at // least `storage_size` bytes in length. Ideally, max_values would be set to // TF_AttrMetadata.list_size and `storage` would be at least // TF_AttrMetadata.total_size, obtained from TF_OperationGetAttrMetadata(oper, // attr_name). // // Fails if storage_size is too small to hold the requested number of strings. public static native void TF_OperationGetAttrStringList( TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("void**") PointerPointer values, @Cast("size_t*") SizeTPointer lengths, int max_values, Pointer storage, @Cast("size_t") long storage_size, TF_Status status); public static native void TF_OperationGetAttrStringList( TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("void**") @ByPtrPtr Pointer values, @Cast("size_t*") SizeTPointer lengths, int max_values, Pointer storage, @Cast("size_t") long storage_size, TF_Status status); public static native void TF_OperationGetAttrStringList( TF_Operation oper, String attr_name, @Cast("void**") @ByPtrPtr Pointer values, @Cast("size_t*") SizeTPointer lengths, int max_values, Pointer storage, @Cast("size_t") long storage_size, TF_Status status); public static native void TF_OperationGetAttrInt(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("int64_t*") LongPointer value, TF_Status status); public static native void TF_OperationGetAttrInt(TF_Operation oper, String attr_name, @Cast("int64_t*") LongBuffer value, TF_Status status); public static native void TF_OperationGetAttrInt(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("int64_t*") long[] value, TF_Status status); public static native void TF_OperationGetAttrInt(TF_Operation oper, String attr_name, @Cast("int64_t*") LongPointer value, TF_Status status); public static native void TF_OperationGetAttrInt(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("int64_t*") LongBuffer value, TF_Status status); public static native void TF_OperationGetAttrInt(TF_Operation oper, String attr_name, @Cast("int64_t*") long[] value, TF_Status status); // Fills in `values` with the value of the attribute `attr_name` of `oper`. // `values` must point to an array of length at least `max_values` (ideally set // TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper, // attr_name)). public static native void TF_OperationGetAttrIntList(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("int64_t*") LongPointer values, int max_values, TF_Status status); public static native void TF_OperationGetAttrIntList(TF_Operation oper, String attr_name, @Cast("int64_t*") LongBuffer values, int max_values, TF_Status status); public static native void TF_OperationGetAttrIntList(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("int64_t*") long[] values, int max_values, TF_Status status); public static native void TF_OperationGetAttrIntList(TF_Operation oper, String attr_name, @Cast("int64_t*") LongPointer values, int max_values, TF_Status status); public static native void TF_OperationGetAttrIntList(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("int64_t*") LongBuffer values, int max_values, TF_Status status); public static native void TF_OperationGetAttrIntList(TF_Operation oper, String attr_name, @Cast("int64_t*") long[] values, int max_values, TF_Status status); public static native void TF_OperationGetAttrFloat(TF_Operation oper, @Cast("const char*") BytePointer attr_name, FloatPointer value, TF_Status status); public static native void TF_OperationGetAttrFloat(TF_Operation oper, String attr_name, FloatBuffer value, TF_Status status); public static native void TF_OperationGetAttrFloat(TF_Operation oper, @Cast("const char*") BytePointer attr_name, float[] value, TF_Status status); public static native void TF_OperationGetAttrFloat(TF_Operation oper, String attr_name, FloatPointer value, TF_Status status); public static native void TF_OperationGetAttrFloat(TF_Operation oper, @Cast("const char*") BytePointer attr_name, FloatBuffer value, TF_Status status); public static native void TF_OperationGetAttrFloat(TF_Operation oper, String attr_name, float[] value, TF_Status status); // Fills in `values` with the value of the attribute `attr_name` of `oper`. // `values` must point to an array of length at least `max_values` (ideally set // to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper, // attr_name)). public static native void TF_OperationGetAttrFloatList(TF_Operation oper, @Cast("const char*") BytePointer attr_name, FloatPointer values, int max_values, TF_Status status); public static native void TF_OperationGetAttrFloatList(TF_Operation oper, String attr_name, FloatBuffer values, int max_values, TF_Status status); public static native void TF_OperationGetAttrFloatList(TF_Operation oper, @Cast("const char*") BytePointer attr_name, float[] values, int max_values, TF_Status status); public static native void TF_OperationGetAttrFloatList(TF_Operation oper, String attr_name, FloatPointer values, int max_values, TF_Status status); public static native void TF_OperationGetAttrFloatList(TF_Operation oper, @Cast("const char*") BytePointer attr_name, FloatBuffer values, int max_values, TF_Status status); public static native void TF_OperationGetAttrFloatList(TF_Operation oper, String attr_name, float[] values, int max_values, TF_Status status); public static native void TF_OperationGetAttrBool(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("unsigned char*") BytePointer value, TF_Status status); public static native void TF_OperationGetAttrBool(TF_Operation oper, String attr_name, @Cast("unsigned char*") ByteBuffer value, TF_Status status); public static native void TF_OperationGetAttrBool(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("unsigned char*") byte[] value, TF_Status status); public static native void TF_OperationGetAttrBool(TF_Operation oper, String attr_name, @Cast("unsigned char*") BytePointer value, TF_Status status); public static native void TF_OperationGetAttrBool(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("unsigned char*") ByteBuffer value, TF_Status status); public static native void TF_OperationGetAttrBool(TF_Operation oper, String attr_name, @Cast("unsigned char*") byte[] value, TF_Status status); // Fills in `values` with the value of the attribute `attr_name` of `oper`. // `values` must point to an array of length at least `max_values` (ideally set // to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper, // attr_name)). public static native void TF_OperationGetAttrBoolList(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("unsigned char*") BytePointer values, int max_values, TF_Status status); public static native void TF_OperationGetAttrBoolList(TF_Operation oper, String attr_name, @Cast("unsigned char*") ByteBuffer values, int max_values, TF_Status status); public static native void TF_OperationGetAttrBoolList(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("unsigned char*") byte[] values, int max_values, TF_Status status); public static native void TF_OperationGetAttrBoolList(TF_Operation oper, String attr_name, @Cast("unsigned char*") BytePointer values, int max_values, TF_Status status); public static native void TF_OperationGetAttrBoolList(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("unsigned char*") ByteBuffer values, int max_values, TF_Status status); public static native void TF_OperationGetAttrBoolList(TF_Operation oper, String attr_name, @Cast("unsigned char*") byte[] values, int max_values, TF_Status status); public static native void TF_OperationGetAttrType(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("TF_DataType*") IntPointer value, TF_Status status); public static native void TF_OperationGetAttrType(TF_Operation oper, String attr_name, @Cast("TF_DataType*") IntBuffer value, TF_Status status); public static native void TF_OperationGetAttrType(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("TF_DataType*") int[] value, TF_Status status); public static native void TF_OperationGetAttrType(TF_Operation oper, String attr_name, @Cast("TF_DataType*") IntPointer value, TF_Status status); public static native void TF_OperationGetAttrType(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("TF_DataType*") IntBuffer value, TF_Status status); public static native void TF_OperationGetAttrType(TF_Operation oper, String attr_name, @Cast("TF_DataType*") int[] value, TF_Status status); // Fills in `values` with the value of the attribute `attr_name` of `oper`. // `values` must point to an array of length at least `max_values` (ideally set // to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper, // attr_name)). public static native void TF_OperationGetAttrTypeList(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("TF_DataType*") IntPointer values, int max_values, TF_Status status); public static native void TF_OperationGetAttrTypeList(TF_Operation oper, String attr_name, @Cast("TF_DataType*") IntBuffer values, int max_values, TF_Status status); public static native void TF_OperationGetAttrTypeList(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("TF_DataType*") int[] values, int max_values, TF_Status status); public static native void TF_OperationGetAttrTypeList(TF_Operation oper, String attr_name, @Cast("TF_DataType*") IntPointer values, int max_values, TF_Status status); public static native void TF_OperationGetAttrTypeList(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("TF_DataType*") IntBuffer values, int max_values, TF_Status status); public static native void TF_OperationGetAttrTypeList(TF_Operation oper, String attr_name, @Cast("TF_DataType*") int[] values, int max_values, TF_Status status); // Fills in `value` with the value of the attribute `attr_name` of `oper`. // `values` must point to an array of length at least `num_dims` (ideally set to // TF_Attr_Meta.size from TF_OperationGetAttrMetadata(oper, attr_name)). public static native void TF_OperationGetAttrShape(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("int64_t*") LongPointer value, int num_dims, TF_Status status); public static native void TF_OperationGetAttrShape(TF_Operation oper, String attr_name, @Cast("int64_t*") LongBuffer value, int num_dims, TF_Status status); public static native void TF_OperationGetAttrShape(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("int64_t*") long[] value, int num_dims, TF_Status status); public static native void TF_OperationGetAttrShape(TF_Operation oper, String attr_name, @Cast("int64_t*") LongPointer value, int num_dims, TF_Status status); public static native void TF_OperationGetAttrShape(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("int64_t*") LongBuffer value, int num_dims, TF_Status status); public static native void TF_OperationGetAttrShape(TF_Operation oper, String attr_name, @Cast("int64_t*") long[] value, int num_dims, TF_Status status); // Fills in `dims` with the list of shapes in the attribute `attr_name` of // `oper` and `num_dims` with the corresponding number of dimensions. On return, // for every i where `num_dims[i]` > 0, `dims[i]` will be an array of // `num_dims[i]` elements. A value of -1 for `num_dims[i]` indicates that the // i-th shape in the list is unknown. // // The elements of `dims` will point to addresses in `storage` which must be // large enough to hold at least `storage_size` int64_ts. Ideally, `num_shapes` // would be set to TF_AttrMetadata.list_size and `storage_size` would be set to // TF_AttrMetadata.total_size from TF_OperationGetAttrMetadata(oper, // attr_name). // // Fails if storage_size is insufficient to hold the requested shapes. public static native void TF_OperationGetAttrShapeList( TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("int64_t**") PointerPointer dims, IntPointer num_dims, int num_shapes, @Cast("int64_t*") LongPointer storage, int storage_size, TF_Status status); public static native void TF_OperationGetAttrShapeList( TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("int64_t**") @ByPtrPtr LongPointer dims, IntPointer num_dims, int num_shapes, @Cast("int64_t*") LongPointer storage, int storage_size, TF_Status status); public static native void TF_OperationGetAttrShapeList( TF_Operation oper, String attr_name, @Cast("int64_t**") @ByPtrPtr LongBuffer dims, IntBuffer num_dims, int num_shapes, @Cast("int64_t*") LongBuffer storage, int storage_size, TF_Status status); public static native void TF_OperationGetAttrShapeList( TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("int64_t**") @ByPtrPtr long[] dims, int[] num_dims, int num_shapes, @Cast("int64_t*") long[] storage, int storage_size, TF_Status status); public static native void TF_OperationGetAttrShapeList( TF_Operation oper, String attr_name, @Cast("int64_t**") @ByPtrPtr LongPointer dims, IntPointer num_dims, int num_shapes, @Cast("int64_t*") LongPointer storage, int storage_size, TF_Status status); public static native void TF_OperationGetAttrShapeList( TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("int64_t**") @ByPtrPtr LongBuffer dims, IntBuffer num_dims, int num_shapes, @Cast("int64_t*") LongBuffer storage, int storage_size, TF_Status status); public static native void TF_OperationGetAttrShapeList( TF_Operation oper, String attr_name, @Cast("int64_t**") @ByPtrPtr long[] dims, int[] num_dims, int num_shapes, @Cast("int64_t*") long[] storage, int storage_size, TF_Status status); // Sets `value` to the binary-serialized TensorShapeProto of the value of // `attr_name` attribute of `oper`'. public static native void TF_OperationGetAttrTensorShapeProto( TF_Operation oper, @Cast("const char*") BytePointer attr_name, TF_Buffer value, TF_Status status); public static native void TF_OperationGetAttrTensorShapeProto( TF_Operation oper, String attr_name, TF_Buffer value, TF_Status status); // Fills in `values` with binary-serialized TensorShapeProto values of the // attribute `attr_name` of `oper`. `values` must point to an array of length at // least `num_values` (ideally set to TF_AttrMetadata.list_size from // TF_OperationGetAttrMetadata(oper, attr_name)). public static native void TF_OperationGetAttrTensorShapeProtoList( TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("TF_Buffer**") PointerPointer values, int max_values, TF_Status status); public static native void TF_OperationGetAttrTensorShapeProtoList( TF_Operation oper, @Cast("const char*") BytePointer attr_name, @ByPtrPtr TF_Buffer values, int max_values, TF_Status status); public static native void TF_OperationGetAttrTensorShapeProtoList( TF_Operation oper, String attr_name, @ByPtrPtr TF_Buffer values, int max_values, TF_Status status); // Gets the TF_Tensor valued attribute of `attr_name` of `oper`. // // Allocates a new TF_Tensor which the caller is expected to take // ownership of (and can deallocate using TF_DeleteTensor). public static native void TF_OperationGetAttrTensor(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("TF_Tensor**") PointerPointer value, TF_Status status); public static native void TF_OperationGetAttrTensor(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @ByPtrPtr TF_Tensor value, TF_Status status); public static native void TF_OperationGetAttrTensor(TF_Operation oper, String attr_name, @ByPtrPtr TF_Tensor value, TF_Status status); // Fills in `values` with the TF_Tensor values of the attribute `attr_name` of // `oper`. `values` must point to an array of TF_Tensor* of length at least // `max_values` (ideally set to TF_AttrMetadata.list_size from // TF_OperationGetAttrMetadata(oper, attr_name)). // // The caller takes ownership of all the non-null TF_Tensor* entries in `values` // (which can be deleted using TF_DeleteTensor(values[i])). public static native void TF_OperationGetAttrTensorList(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @Cast("TF_Tensor**") PointerPointer values, int max_values, TF_Status status); public static native void TF_OperationGetAttrTensorList(TF_Operation oper, @Cast("const char*") BytePointer attr_name, @ByPtrPtr TF_Tensor values, int max_values, TF_Status status); public static native void TF_OperationGetAttrTensorList(TF_Operation oper, String attr_name, @ByPtrPtr TF_Tensor values, int max_values, TF_Status status); // Sets `output_attr_value` to the binary-serialized AttrValue proto // representation of the value of the `attr_name` attr of `oper`. public static native void TF_OperationGetAttrValueProto( TF_Operation oper, @Cast("const char*") BytePointer attr_name, TF_Buffer output_attr_value, TF_Status status); public static native void TF_OperationGetAttrValueProto( TF_Operation oper, String attr_name, TF_Buffer output_attr_value, TF_Status status); // Returns the operation in the graph with `oper_name`. Returns nullptr if // no operation found. public static native TF_Operation TF_GraphOperationByName( TF_Graph graph, @Cast("const char*") BytePointer oper_name); public static native TF_Operation TF_GraphOperationByName( TF_Graph graph, String oper_name); // Iterate through the operations of a graph. To use: // size_t pos = 0; // TF_Operation* oper; // while ((oper = TF_GraphNextOperation(graph, &pos)) != nullptr) { // DoSomethingWithOperation(oper); // } public static native TF_Operation TF_GraphNextOperation(TF_Graph graph, @Cast("size_t*") SizeTPointer pos); // Write out a serialized representation of `graph` (as a GraphDef protocol // message) to `output_graph_def` (allocated by TF_NewBuffer()). // `output_graph_def`'s underlying buffer will be freed when TF_DeleteBuffer() // is called. // // May fail on very large graphs in the future. public static native void TF_GraphToGraphDef(TF_Graph graph, TF_Buffer output_graph_def, TF_Status status); // Returns the serialized OpDef proto with name `op_name`, or a bad status if no // such op exists. This can return OpDefs of functions copied into the graph. public static native void TF_GraphGetOpDef(TF_Graph graph, @Cast("const char*") BytePointer op_name, TF_Buffer output_op_def, TF_Status status); public static native void TF_GraphGetOpDef(TF_Graph graph, String op_name, TF_Buffer output_op_def, TF_Status status); // Returns the serialized VersionDef proto for this graph. public static native void TF_GraphVersions(TF_Graph graph, TF_Buffer output_version_def, TF_Status status); // TF_ImportGraphDefOptions holds options that can be passed to // TF_GraphImportGraphDef. public static native TF_ImportGraphDefOptions TF_NewImportGraphDefOptions(); public static native void TF_DeleteImportGraphDefOptions( TF_ImportGraphDefOptions opts); // Set the prefix to be prepended to the names of nodes in `graph_def` that will // be imported into `graph`. `prefix` is copied and has no lifetime // requirements. public static native void TF_ImportGraphDefOptionsSetPrefix( TF_ImportGraphDefOptions opts, @Cast("const char*") BytePointer prefix); public static native void TF_ImportGraphDefOptionsSetPrefix( TF_ImportGraphDefOptions opts, String prefix); // Set whether to uniquify imported operation names. If true, imported operation // names will be modified if their name already exists in the graph. If false, // conflicting names will be treated as an error. Note that this option has no // effect if a prefix is set, since the prefix will guarantee all names are // unique. Defaults to false. public static native void TF_ImportGraphDefOptionsSetUniquifyNames( TF_ImportGraphDefOptions opts, @Cast("unsigned char") byte uniquify_names); // If true, the specified prefix will be modified if it already exists as an // operation name or prefix in the graph. If false, a conflicting prefix will be // treated as an error. This option has no effect if no prefix is specified. public static native void TF_ImportGraphDefOptionsSetUniquifyPrefix( TF_ImportGraphDefOptions opts, @Cast("unsigned char") byte uniquify_prefix); // Set any imported nodes with input `src_name:src_index` to have that input // replaced with `dst`. `src_name` refers to a node in the graph to be imported, // `dst` references a node already existing in the graph being imported into. // `src_name` is copied and has no lifetime requirements. public static native void TF_ImportGraphDefOptionsAddInputMapping( TF_ImportGraphDefOptions opts, @Cast("const char*") BytePointer src_name, int src_index, @ByVal TF_Output dst); public static native void TF_ImportGraphDefOptionsAddInputMapping( TF_ImportGraphDefOptions opts, String src_name, int src_index, @ByVal TF_Output dst); // Set any imported nodes with control input `src_name` to have that input // replaced with `dst`. `src_name` refers to a node in the graph to be imported, // `dst` references an operation already existing in the graph being imported // into. `src_name` is copied and has no lifetime requirements. public static native void TF_ImportGraphDefOptionsRemapControlDependency( TF_ImportGraphDefOptions opts, @Cast("const char*") BytePointer src_name, TF_Operation dst); public static native void TF_ImportGraphDefOptionsRemapControlDependency( TF_ImportGraphDefOptions opts, String src_name, TF_Operation dst); // Cause the imported graph to have a control dependency on `oper`. `oper` // should exist in the graph being imported into. public static native void TF_ImportGraphDefOptionsAddControlDependency( TF_ImportGraphDefOptions opts, TF_Operation oper); // Add an output in `graph_def` to be returned via the `return_outputs` output // parameter of TF_GraphImportGraphDef(). If the output is remapped via an input // mapping, the corresponding existing tensor in `graph` will be returned. // `oper_name` is copied and has no lifetime requirements. public static native void TF_ImportGraphDefOptionsAddReturnOutput( TF_ImportGraphDefOptions opts, @Cast("const char*") BytePointer oper_name, int index); public static native void TF_ImportGraphDefOptionsAddReturnOutput( TF_ImportGraphDefOptions opts, String oper_name, int index); // Returns the number of return outputs added via // TF_ImportGraphDefOptionsAddReturnOutput(). public static native int TF_ImportGraphDefOptionsNumReturnOutputs( @Const TF_ImportGraphDefOptions opts); // Add an operation in `graph_def` to be returned via the `return_opers` output // parameter of TF_GraphImportGraphDef(). `oper_name` is copied and has no // lifetime requirements. public static native void TF_ImportGraphDefOptionsAddReturnOperation( TF_ImportGraphDefOptions opts, @Cast("const char*") BytePointer oper_name); public static native void TF_ImportGraphDefOptionsAddReturnOperation( TF_ImportGraphDefOptions opts, String oper_name); // Returns the number of return operations added via // TF_ImportGraphDefOptionsAddReturnOperation(). public static native int TF_ImportGraphDefOptionsNumReturnOperations( @Const TF_ImportGraphDefOptions opts); // TF_ImportGraphDefResults holds results that are generated by // TF_GraphImportGraphDefWithResults(). // Fetches the return outputs requested via // TF_ImportGraphDefOptionsAddReturnOutput(). The number of fetched outputs is // returned in `num_outputs`. The array of return outputs is returned in // `outputs`. `*outputs` is owned by and has the lifetime of `results`. public static native void TF_ImportGraphDefResultsReturnOutputs( TF_ImportGraphDefResults results, IntPointer num_outputs, @Cast("TF_Output**") PointerPointer outputs); public static native void TF_ImportGraphDefResultsReturnOutputs( TF_ImportGraphDefResults results, IntPointer num_outputs, @ByPtrPtr TF_Output outputs); public static native void TF_ImportGraphDefResultsReturnOutputs( TF_ImportGraphDefResults results, IntBuffer num_outputs, @ByPtrPtr TF_Output outputs); public static native void TF_ImportGraphDefResultsReturnOutputs( TF_ImportGraphDefResults results, int[] num_outputs, @ByPtrPtr TF_Output outputs); // Fetches the return operations requested via // TF_ImportGraphDefOptionsAddReturnOperation(). The number of fetched // operations is returned in `num_opers`. The array of return operations is // returned in `opers`. `*opers` is owned by and has the lifetime of `results`. public static native void TF_ImportGraphDefResultsReturnOperations( TF_ImportGraphDefResults results, IntPointer num_opers, @Cast("TF_Operation***") @ByPtrPtr PointerPointer opers); public static native void TF_ImportGraphDefResultsReturnOperations( TF_ImportGraphDefResults results, IntBuffer num_opers, @Cast("TF_Operation***") @ByPtrPtr PointerPointer opers); public static native void TF_ImportGraphDefResultsReturnOperations( TF_ImportGraphDefResults results, int[] num_opers, @Cast("TF_Operation***") @ByPtrPtr PointerPointer opers); // Fetches any input mappings requested via // TF_ImportGraphDefOptionsAddInputMapping() that didn't appear in the GraphDef // and weren't used as input to any node in the imported graph def. The number // of fetched mappings is returned in `num_missing_unused_input_mappings`. The // array of each mapping's source node name is returned in `src_names`, and the // array of each mapping's source index is returned in `src_indexes`. // // `*src_names`, `*src_indexes`, and the memory backing each string in // `src_names` are owned by and have the lifetime of `results`. public static native void TF_ImportGraphDefResultsMissingUnusedInputMappings( TF_ImportGraphDefResults results, IntPointer num_missing_unused_input_mappings, @Cast("const char***") @ByPtrPtr PointerPointer src_names, @Cast("int**") PointerPointer src_indexes); public static native void TF_ImportGraphDefResultsMissingUnusedInputMappings( TF_ImportGraphDefResults results, IntPointer num_missing_unused_input_mappings, @Cast("const char***") @ByPtrPtr PointerPointer src_names, @ByPtrPtr IntPointer src_indexes); public static native void TF_ImportGraphDefResultsMissingUnusedInputMappings( TF_ImportGraphDefResults results, IntBuffer num_missing_unused_input_mappings, @Cast("const char***") @ByPtrPtr PointerPointer src_names, @ByPtrPtr IntBuffer src_indexes); public static native void TF_ImportGraphDefResultsMissingUnusedInputMappings( TF_ImportGraphDefResults results, int[] num_missing_unused_input_mappings, @Cast("const char***") @ByPtrPtr PointerPointer src_names, @ByPtrPtr int... src_indexes); // Deletes a results object returned by TF_GraphImportGraphDefWithResults(). public static native void TF_DeleteImportGraphDefResults( TF_ImportGraphDefResults results); // Import the graph serialized in `graph_def` into `graph`. Returns nullptr and // a bad status on error. Otherwise, returns a populated // TF_ImportGraphDefResults instance. The returned instance must be deleted via // TF_DeleteImportGraphDefResults(). public static native TF_ImportGraphDefResults TF_GraphImportGraphDefWithResults(TF_Graph graph, @Const TF_Buffer graph_def, @Const TF_ImportGraphDefOptions options, TF_Status status); // Import the graph serialized in `graph_def` into `graph`. // Convenience function for when only return outputs are needed. // // `num_return_outputs` must be the number of return outputs added (i.e. the // result of TF_ImportGraphDefOptionsNumReturnOutputs()). If // `num_return_outputs` is non-zero, `return_outputs` must be of length // `num_return_outputs`. Otherwise it can be null. public static native void TF_GraphImportGraphDefWithReturnOutputs( TF_Graph graph, @Const TF_Buffer graph_def, @Const TF_ImportGraphDefOptions options, TF_Output return_outputs, int num_return_outputs, TF_Status status); // Import the graph serialized in `graph_def` into `graph`. // Convenience function for when no results are needed. public static native void TF_GraphImportGraphDef( TF_Graph graph, @Const TF_Buffer graph_def, @Const TF_ImportGraphDefOptions options, TF_Status status); // Adds a copy of function `func` and optionally its gradient function `grad` // to `g`. Once `func`/`grad` is added to `g`, it can be called by creating // an operation using the function's name. // Any changes to `func`/`grad` (including deleting it) done after this method // returns, won't affect the copy of `func`/`grad` in `g`. // If `func` or `grad` are already in `g`, TF_GraphCopyFunction has no // effect on them, but can establish the function->gradient relationship // between them if `func` does not already have a gradient. If `func` already // has a gradient different from `grad`, an error is returned. // // `func` must not be null. // If `grad` is null and `func` is not in `g`, `func` is added without a // gradient. // If `grad` is null and `func` is in `g`, TF_GraphCopyFunction is a noop. // `grad` must have appropriate signature as described in the doc of // GradientDef in tensorflow/core/framework/function.proto. // // If successful, status is set to OK and `func` and `grad` are added to `g`. // Otherwise, status is set to the encountered error and `g` is unmodified. public static native void TF_GraphCopyFunction(TF_Graph g, @Const TF_Function func, @Const TF_Function grad, TF_Status status); // Returns the number of TF_Functions registered in `g`. public static native int TF_GraphNumFunctions(TF_Graph g); // Fills in `funcs` with the TF_Function* registered in `g`. // `funcs` must point to an array of TF_Function* of length at least // `max_func`. In usual usage, max_func should be set to the result of // TF_GraphNumFunctions(g). In this case, all the functions registered in // `g` will be returned. Else, an unspecified subset. // // If successful, returns the number of TF_Function* successfully set in // `funcs` and sets status to OK. The caller takes ownership of // all the returned TF_Functions. They must be deleted with TF_DeleteFunction. // On error, returns 0, sets status to the encountered error, and the contents // of funcs will be undefined. public static native int TF_GraphGetFunctions(TF_Graph g, @Cast("TF_Function**") PointerPointer funcs, int max_func, TF_Status status); public static native int TF_GraphGetFunctions(TF_Graph g, @ByPtrPtr TF_Function funcs, int max_func, TF_Status status); // Note: The following function may fail on very large protos in the future. public static native void TF_OperationToNodeDef(TF_Operation oper, TF_Buffer output_node_def, TF_Status status); public static class TF_WhileParams extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_WhileParams(Pointer p) { super(p); } // The number of inputs to the while loop, i.e. the number of loop variables. // This is the size of cond_inputs, body_inputs, and body_outputs. @MemberGetter public native int ninputs(); // The while condition graph. The inputs are the current values of the loop // variables. The output should be a scalar boolean. @MemberGetter public native TF_Graph cond_graph(); @MemberGetter public native @Const TF_Output cond_inputs(); public native @ByRef TF_Output cond_output(); public native TF_WhileParams cond_output(TF_Output cond_output); // The loop body graph. The inputs are the current values of the loop // variables. The outputs are the updated values of the loop variables. @MemberGetter public native TF_Graph body_graph(); @MemberGetter public native @Const TF_Output body_inputs(); @MemberGetter public native TF_Output body_outputs(); // Unique null-terminated name for this while loop. This is used as a prefix // for created operations. @MemberGetter public native @Cast("const char*") BytePointer name(); } // Creates a TF_WhileParams for creating a while loop in `g`. `inputs` are // outputs that already exist in `g` used as initial values for the loop // variables. // // The returned TF_WhileParams will have all fields initialized except // `cond_output`, `body_outputs`, and `name`. The `body_outputs` buffer will be // allocated to size `ninputs`. The caller should build `cond_graph` and // `body_graph` starting from the inputs, and store the final outputs in // `cond_output` and `body_outputs`. // // If `status` is OK, the caller must call either TF_FinishWhile or // TF_AbortWhile on the returned TF_WhileParams. If `status` isn't OK, the // returned TF_WhileParams is not valid, and the caller should not call // TF_FinishWhile() or TF_AbortWhile(). // // Missing functionality (TODO): // - Gradients // - Reference-type inputs // - Directly referencing external tensors from the cond/body graphs (this is // possible in the Python API) public static native @ByVal TF_WhileParams TF_NewWhile(TF_Graph g, TF_Output inputs, int ninputs, TF_Status status); // Builds the while loop specified by `params` and returns the output tensors of // the while loop in `outputs`. `outputs` should be allocated to size // `params.ninputs`. // // `params` is no longer valid once this returns. // // Either this or TF_AbortWhile() must be called after a successful // TF_NewWhile() call. public static native void TF_FinishWhile(@Const TF_WhileParams params, TF_Status status, TF_Output outputs); // Frees `params`s resources without building a while loop. `params` is no // longer valid after this returns. Either this or TF_FinishWhile() must be // called after a successful TF_NewWhile() call. public static native void TF_AbortWhile(@Const TF_WhileParams params); // Adds operations to compute the partial derivatives of sum of `y`s w.r.t `x`s, // i.e., d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2... // // `dx` are used as initial gradients (which represent the symbolic partial // derivatives of some loss function `L` w.r.t. `y`). // `dx` must be nullptr or have size `ny`. // If `dx` is nullptr, the implementation will use dx of `OnesLike` for all // shapes in `y`. // The partial derivatives are returned in `dy`. `dy` should be allocated to // size `nx`. // // Gradient nodes are automatically named under the "gradients/" prefix. To // guarantee name uniqueness, subsequent calls to the same graph will // append an incremental tag to the prefix: "gradients_1/", "gradients_2/", ... // See TF_AddGradientsWithPrefix, which provides a means to specify a custom // name prefix for operations added to a graph to compute the gradients. // // WARNING: This function does not yet support all the gradients that python // supports. See // https://www.tensorflow.org/code/tensorflow/cc/gradients/README.md // for instructions on how to add C++ more gradients. public static native void TF_AddGradients(TF_Graph g, TF_Output y, int ny, TF_Output x, int nx, TF_Output dx, TF_Status status, TF_Output dy); // Adds operations to compute the partial derivatives of sum of `y`s w.r.t `x`s, // i.e., d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2... // This is a variant of TF_AddGradients that allows to caller to pass a custom // name prefix to the operations added to a graph to compute the gradients. // // `dx` are used as initial gradients (which represent the symbolic partial // derivatives of some loss function `L` w.r.t. `y`). // `dx` must be nullptr or have size `ny`. // If `dx` is nullptr, the implementation will use dx of `OnesLike` for all // shapes in `y`. // The partial derivatives are returned in `dy`. `dy` should be allocated to // size `nx`. // `prefix` names the scope into which all gradients operations are being added. // `prefix` must be unique within the provided graph otherwise this operation // will fail. If `prefix` is nullptr, the default prefixing behaviour takes // place, see TF_AddGradients for more details. // // WARNING: This function does not yet support all the gradients that python // supports. See // https://www.tensorflow.org/code/tensorflow/cc/gradients/README.md // for instructions on how to add C++ more gradients. public static native void TF_AddGradientsWithPrefix(TF_Graph g, @Cast("const char*") BytePointer prefix, TF_Output y, int ny, TF_Output x, int nx, TF_Output dx, TF_Status status, TF_Output dy); public static native void TF_AddGradientsWithPrefix(TF_Graph g, String prefix, TF_Output y, int ny, TF_Output x, int nx, TF_Output dx, TF_Status status, TF_Output dy); // Create a TF_Function from a TF_Graph // // Params: // fn_body - the graph whose operations (or subset of whose operations) will be // converted to TF_Function. // fn_name - the name of the new TF_Function. Should match the operation // name (OpDef.name) regexp [A-Z][A-Za-z0-9_.\\-/]*. // If `append_hash_to_fn_name` is false, `fn_name` must be distinct // from other function and operation names (at least those // registered in graphs where this function will be used). // append_hash_to_fn_name - Must be 0 or 1. If set to 1, the actual name // of the function will be `fn_name` appended with // '_'. // If set to 0, the function's name will be `fn_name`. // num_opers - `num_opers` contains the number of elements in the `opers` array // or a special value of -1 meaning that no array is given. // The distinction between an empty array of operations and no // array of operations is necessary to distinguish the case of // creating a function with no body (e.g. identity or permutation) // and the case of creating a function whose body contains all // the nodes in the graph (except for the automatic skipping, see // below). // opers - Array of operations to become the body of the function or null. // - If no array is given (`num_opers` = -1), all the // operations in `fn_body` will become part of the function // except operations referenced in `inputs`. These operations // must have a single output (these operations are typically // placeholders created for the sole purpose of representing // an input. We can relax this constraint if there are // compelling use cases). // - If an array is given (`num_opers` >= 0), all operations // in it will become part of the function. In particular, no // automatic skipping of dummy input operations is performed. // ninputs - number of elements in `inputs` array // inputs - array of TF_Outputs that specify the inputs to the function. // If `ninputs` is zero (the function takes no inputs), `inputs` // can be null. The names used for function inputs are normalized // names of the operations (usually placeholders) pointed to by // `inputs`. These operation names should start with a letter. // Normalization will convert all letters to lowercase and // non-alphanumeric characters to '_' to make resulting names match // the "[a-z][a-z0-9_]*" pattern for operation argument names. // `inputs` cannot contain the same tensor twice. // noutputs - number of elements in `outputs` array // outputs - array of TF_Outputs that specify the outputs of the function. // If `noutputs` is zero (the function returns no outputs), `outputs` // can be null. `outputs` can contain the same tensor more than once. // output_names - The names of the function's outputs. `output_names` array // must either have the same length as `outputs` // (i.e. `noutputs`) or be null. In the former case, // the names should match the regular expression for ArgDef // names - "[a-z][a-z0-9_]*". In the latter case, // names for outputs will be generated automatically. // opts - various options for the function, e.g. XLA's inlining control. // description - optional human-readable description of this function. // status - Set to OK on success and an appropriate error on failure. // // Note that when the same TF_Output is listed as both an input and an output, // the corresponding function's output will equal to this input, // instead of the original node's output. // // Callers must also satisfy the following constraints: // - `inputs` cannot refer to TF_Outputs within a control flow context. For // example, one cannot use the output of "switch" node as input. // - `inputs` and `outputs` cannot have reference types. Reference types are // not exposed through C API and are being replaced with Resources. We support // reference types inside function's body to support legacy code. Do not // use them in new code. // - Every node in the function's body must have all of its inputs (including // control inputs). In other words, for every node in the body, each input // must be either listed in `inputs` or must come from another node in // the body. In particular, it is an error to have a control edge going from // a node outside of the body into a node in the body. This applies to control // edges going from nodes referenced in `inputs` to nodes in the body when // the former nodes are not in the body (automatically skipped or not // included in explicitly specified body). // // Returns: // On success, a newly created TF_Function instance. It must be deleted by // calling TF_DeleteFunction. // // On failure, null. public static native TF_Function TF_GraphToFunction( @Const TF_Graph fn_body, @Cast("const char*") BytePointer fn_name, @Cast("unsigned char") byte append_hash_to_fn_name, int num_opers, @Cast("const TF_Operation*const*") PointerPointer opers, int ninputs, @Const TF_Output inputs, int noutputs, @Const TF_Output outputs, @Cast("const char*const*") PointerPointer output_names, @Const TF_FunctionOptions opts, @Cast("const char*") BytePointer description, TF_Status status); public static native TF_Function TF_GraphToFunction( @Const TF_Graph fn_body, @Cast("const char*") BytePointer fn_name, @Cast("unsigned char") byte append_hash_to_fn_name, int num_opers, @Const @ByPtrPtr TF_Operation opers, int ninputs, @Const TF_Output inputs, int noutputs, @Const TF_Output outputs, @Cast("const char*const*") @ByPtrPtr BytePointer output_names, @Const TF_FunctionOptions opts, @Cast("const char*") BytePointer description, TF_Status status); public static native TF_Function TF_GraphToFunction( @Const TF_Graph fn_body, String fn_name, @Cast("unsigned char") byte append_hash_to_fn_name, int num_opers, @Const @ByPtrPtr TF_Operation opers, int ninputs, @Const TF_Output inputs, int noutputs, @Const TF_Output outputs, @Cast("const char*const*") @ByPtrPtr ByteBuffer output_names, @Const TF_FunctionOptions opts, String description, TF_Status status); public static native TF_Function TF_GraphToFunction( @Const TF_Graph fn_body, @Cast("const char*") BytePointer fn_name, @Cast("unsigned char") byte append_hash_to_fn_name, int num_opers, @Const @ByPtrPtr TF_Operation opers, int ninputs, @Const TF_Output inputs, int noutputs, @Const TF_Output outputs, @Cast("const char*const*") @ByPtrPtr byte[] output_names, @Const TF_FunctionOptions opts, @Cast("const char*") BytePointer description, TF_Status status); public static native TF_Function TF_GraphToFunction( @Const TF_Graph fn_body, String fn_name, @Cast("unsigned char") byte append_hash_to_fn_name, int num_opers, @Const @ByPtrPtr TF_Operation opers, int ninputs, @Const TF_Output inputs, int noutputs, @Const TF_Output outputs, @Cast("const char*const*") @ByPtrPtr BytePointer output_names, @Const TF_FunctionOptions opts, String description, TF_Status status); public static native TF_Function TF_GraphToFunction( @Const TF_Graph fn_body, @Cast("const char*") BytePointer fn_name, @Cast("unsigned char") byte append_hash_to_fn_name, int num_opers, @Const @ByPtrPtr TF_Operation opers, int ninputs, @Const TF_Output inputs, int noutputs, @Const TF_Output outputs, @Cast("const char*const*") @ByPtrPtr ByteBuffer output_names, @Const TF_FunctionOptions opts, @Cast("const char*") BytePointer description, TF_Status status); public static native TF_Function TF_GraphToFunction( @Const TF_Graph fn_body, String fn_name, @Cast("unsigned char") byte append_hash_to_fn_name, int num_opers, @Const @ByPtrPtr TF_Operation opers, int ninputs, @Const TF_Output inputs, int noutputs, @Const TF_Output outputs, @Cast("const char*const*") @ByPtrPtr byte[] output_names, @Const TF_FunctionOptions opts, String description, TF_Status status); // Returns the name of the graph function. // The return value points to memory that is only usable until the next // mutation to *func. public static native @Cast("const char*") BytePointer TF_FunctionName(TF_Function func); // Write out a serialized representation of `func` (as a FunctionDef protocol // message) to `output_func_def` (allocated by TF_NewBuffer()). // `output_func_def`'s underlying buffer will be freed when TF_DeleteBuffer() // is called. // // May fail on very large graphs in the future. public static native void TF_FunctionToFunctionDef(TF_Function func, TF_Buffer output_func_def, TF_Status status); // Construct and return the function whose FunctionDef representation is // serialized in `proto`. `proto_len` must equal the number of bytes // pointed to by `proto`. // Returns: // On success, a newly created TF_Function instance. It must be deleted by // calling TF_DeleteFunction. // // On failure, null. public static native TF_Function TF_FunctionImportFunctionDef( @Const Pointer proto, @Cast("size_t") long proto_len, TF_Status status); // Sets function attribute named `attr_name` to value stored in `proto`. // If this attribute is already set to another value, it is overridden. // `proto` should point to a sequence of bytes of length `proto_len` // representing a binary serialization of an AttrValue protocol // buffer. public static native void TF_FunctionSetAttrValueProto(TF_Function func, @Cast("const char*") BytePointer attr_name, @Const Pointer proto, @Cast("size_t") long proto_len, TF_Status status); public static native void TF_FunctionSetAttrValueProto(TF_Function func, String attr_name, @Const Pointer proto, @Cast("size_t") long proto_len, TF_Status status); // Sets `output_attr_value` to the binary-serialized AttrValue proto // representation of the value of the `attr_name` attr of `func`. // If `attr_name` attribute is not present, status is set to an error. public static native void TF_FunctionGetAttrValueProto( TF_Function func, @Cast("const char*") BytePointer attr_name, TF_Buffer output_attr_value, TF_Status status); public static native void TF_FunctionGetAttrValueProto( TF_Function func, String attr_name, TF_Buffer output_attr_value, TF_Status status); // Frees the memory used by the `func` struct. // TF_DeleteFunction is a noop if `func` is null. // Deleting a function does not remove it from any graphs it was copied to. public static native void TF_DeleteFunction(TF_Function func); // Attempts to evaluate `output`. This will only be possible if `output` doesn't // depend on any graph inputs (this function is safe to call if this isn't the // case though). // // If the evaluation is successful, this function returns true and `output`s // value is returned in `result`. Otherwise returns false. An error status is // returned if something is wrong with the graph or input. Note that this may // return false even if no error status is set. public static native @Cast("unsigned char") byte TF_TryEvaluateConstant(TF_Graph graph, @ByVal TF_Output output, @Cast("TF_Tensor**") PointerPointer result, TF_Status status); public static native @Cast("unsigned char") byte TF_TryEvaluateConstant(TF_Graph graph, @ByVal TF_Output output, @ByPtrPtr TF_Tensor result, TF_Status status); // TODO(josh11b): Register OpDef, available to all operations added // to this graph. // -------------------------------------------------------------------------- // API for driving Graph execution. // Return a new execution session with the associated graph, or NULL on // error. Does not take ownership of any input parameters. // // *`graph` must be a valid graph (not deleted or nullptr). `graph` will be be // kept alive for the lifetime of the returned TF_Session. New nodes can still // be added to `graph` after this call. public static native TF_Session TF_NewSession(TF_Graph graph, @Const TF_SessionOptions opts, TF_Status status); // This function creates a new TF_Session (which is created on success) using // `session_options`, and then initializes state (restoring tensors and other // assets) using `run_options`. // // Any NULL and non-NULL value combinations for (`run_options, `meta_graph_def`) // are valid. // // - `export_dir` must be set to the path of the exported SavedModel. // - `tags` must include the set of tags used to identify one MetaGraphDef in // the SavedModel. // - `graph` must be a graph newly allocated with TF_NewGraph(). // // If successful, populates `graph` with the contents of the Graph and // `meta_graph_def` with the MetaGraphDef of the loaded model. public static native @Platform(not="android") TF_Session TF_LoadSessionFromSavedModel( @Const TF_SessionOptions session_options, @Const TF_Buffer run_options, @Cast("const char*") BytePointer export_dir, @Cast("const char*const*") PointerPointer tags, int tags_len, TF_Graph graph, TF_Buffer meta_graph_def, TF_Status status); public static native @Platform(not="android") TF_Session TF_LoadSessionFromSavedModel( @Const TF_SessionOptions session_options, @Const TF_Buffer run_options, @Cast("const char*") BytePointer export_dir, @Cast("const char*const*") @ByPtrPtr BytePointer tags, int tags_len, TF_Graph graph, TF_Buffer meta_graph_def, TF_Status status); public static native @Platform(not="android") TF_Session TF_LoadSessionFromSavedModel( @Const TF_SessionOptions session_options, @Const TF_Buffer run_options, String export_dir, @Cast("const char*const*") @ByPtrPtr ByteBuffer tags, int tags_len, TF_Graph graph, TF_Buffer meta_graph_def, TF_Status status); public static native @Platform(not="android") TF_Session TF_LoadSessionFromSavedModel( @Const TF_SessionOptions session_options, @Const TF_Buffer run_options, @Cast("const char*") BytePointer export_dir, @Cast("const char*const*") @ByPtrPtr byte[] tags, int tags_len, TF_Graph graph, TF_Buffer meta_graph_def, TF_Status status); public static native @Platform(not="android") TF_Session TF_LoadSessionFromSavedModel( @Const TF_SessionOptions session_options, @Const TF_Buffer run_options, String export_dir, @Cast("const char*const*") @ByPtrPtr BytePointer tags, int tags_len, TF_Graph graph, TF_Buffer meta_graph_def, TF_Status status); public static native @Platform(not="android") TF_Session TF_LoadSessionFromSavedModel( @Const TF_SessionOptions session_options, @Const TF_Buffer run_options, @Cast("const char*") BytePointer export_dir, @Cast("const char*const*") @ByPtrPtr ByteBuffer tags, int tags_len, TF_Graph graph, TF_Buffer meta_graph_def, TF_Status status); public static native @Platform(not="android") TF_Session TF_LoadSessionFromSavedModel( @Const TF_SessionOptions session_options, @Const TF_Buffer run_options, String export_dir, @Cast("const char*const*") @ByPtrPtr byte[] tags, int tags_len, TF_Graph graph, TF_Buffer meta_graph_def, TF_Status status); // Close a session. // // Contacts any other processes associated with the session, if applicable. // May not be called after TF_DeleteSession(). public static native void TF_CloseSession(TF_Session arg0, TF_Status status); // Destroy a session object. // // Even if error information is recorded in *status, this call discards all // local resources associated with the session. The session may not be used // during or after this call (and the session drops its reference to the // corresponding graph). public static native void TF_DeleteSession(TF_Session arg0, TF_Status status); // Run the graph associated with the session starting with the supplied inputs // (inputs[0,ninputs-1] with corresponding values in input_values[0,ninputs-1]). // // Any NULL and non-NULL value combinations for (`run_options`, // `run_metadata`) are valid. // // - `run_options` may be NULL, in which case it will be ignored; or // non-NULL, in which case it must point to a `TF_Buffer` containing the // serialized representation of a `RunOptions` protocol buffer. // - `run_metadata` may be NULL, in which case it will be ignored; or // non-NULL, in which case it must point to an empty, freshly allocated // `TF_Buffer` that may be updated to contain the serialized representation // of a `RunMetadata` protocol buffer. // // The caller retains ownership of `input_values` (which can be deleted using // TF_DeleteTensor). The caller also retains ownership of `run_options` and/or // `run_metadata` (when not NULL) and should manually call TF_DeleteBuffer on // them. // // On success, the tensors corresponding to outputs[0,noutputs-1] are placed in // output_values[]. Ownership of the elements of output_values[] is transferred // to the caller, which must eventually call TF_DeleteTensor on them. // // On failure, output_values[] contains NULLs. public static native void TF_SessionRun( TF_Session session, @Const TF_Buffer run_options, @Const TF_Output inputs, @Cast("TF_Tensor*const*") PointerPointer input_values, int ninputs, @Const TF_Output outputs, @Cast("TF_Tensor**") PointerPointer output_values, int noutputs, @Cast("const TF_Operation*const*") PointerPointer target_opers, int ntargets, TF_Buffer run_metadata, TF_Status arg11); public static native void TF_SessionRun( TF_Session session, @Const TF_Buffer run_options, @Const TF_Output inputs, @ByPtrPtr TF_Tensor input_values, int ninputs, @Const TF_Output outputs, @ByPtrPtr TF_Tensor output_values, int noutputs, @Const @ByPtrPtr TF_Operation target_opers, int ntargets, TF_Buffer run_metadata, TF_Status arg11); // Set up the graph with the intended feeds (inputs) and fetches (outputs) for a // sequence of partial run calls. // // On success, returns a handle that is used for subsequent PRun calls. The // handle should be deleted with TF_DeletePRunHandle when it is no longer // needed. // // On failure, out_status contains a tensorflow::Status with an error // message. *handle is set to nullptr. public static native void TF_SessionPRunSetup( TF_Session arg0, @Const TF_Output inputs, int ninputs, @Const TF_Output outputs, int noutputs, @Cast("const TF_Operation*const*") PointerPointer target_opers, int ntargets, @Cast("const char**") PointerPointer handle, TF_Status arg8); public static native void TF_SessionPRunSetup( TF_Session arg0, @Const TF_Output inputs, int ninputs, @Const TF_Output outputs, int noutputs, @Const @ByPtrPtr TF_Operation target_opers, int ntargets, @Cast("const char**") @ByPtrPtr BytePointer handle, TF_Status arg8); public static native void TF_SessionPRunSetup( TF_Session arg0, @Const TF_Output inputs, int ninputs, @Const TF_Output outputs, int noutputs, @Const @ByPtrPtr TF_Operation target_opers, int ntargets, @Cast("const char**") @ByPtrPtr ByteBuffer handle, TF_Status arg8); public static native void TF_SessionPRunSetup( TF_Session arg0, @Const TF_Output inputs, int ninputs, @Const TF_Output outputs, int noutputs, @Const @ByPtrPtr TF_Operation target_opers, int ntargets, @Cast("const char**") @ByPtrPtr byte[] handle, TF_Status arg8); // Continue to run the graph with additional feeds and fetches. The // execution state is uniquely identified by the handle. public static native void TF_SessionPRun( TF_Session arg0, @Cast("const char*") BytePointer handle, @Const TF_Output inputs, @Cast("TF_Tensor*const*") PointerPointer input_values, int ninputs, @Const TF_Output outputs, @Cast("TF_Tensor**") PointerPointer output_values, int noutputs, @Cast("const TF_Operation*const*") PointerPointer target_opers, int ntargets, TF_Status arg10); public static native void TF_SessionPRun( TF_Session arg0, @Cast("const char*") BytePointer handle, @Const TF_Output inputs, @ByPtrPtr TF_Tensor input_values, int ninputs, @Const TF_Output outputs, @ByPtrPtr TF_Tensor output_values, int noutputs, @Const @ByPtrPtr TF_Operation target_opers, int ntargets, TF_Status arg10); public static native void TF_SessionPRun( TF_Session arg0, String handle, @Const TF_Output inputs, @ByPtrPtr TF_Tensor input_values, int ninputs, @Const TF_Output outputs, @ByPtrPtr TF_Tensor output_values, int noutputs, @Const @ByPtrPtr TF_Operation target_opers, int ntargets, TF_Status arg10); // Deletes a handle allocated by TF_SessionPRunSetup. // Once called, no more calls to TF_SessionPRun should be made. public static native void TF_DeletePRunHandle(@Cast("const char*") BytePointer handle); public static native void TF_DeletePRunHandle(String handle); // -------------------------------------------------------------------------- // The deprecated session API. Please switch to the above instead of // TF_ExtendGraph(). This deprecated API can be removed at any time without // notice. public static native TF_DeprecatedSession TF_NewDeprecatedSession( @Const TF_SessionOptions arg0, TF_Status status); public static native void TF_CloseDeprecatedSession(TF_DeprecatedSession arg0, TF_Status status); public static native void TF_DeleteDeprecatedSession(TF_DeprecatedSession arg0, TF_Status status); public static native void TF_Reset(@Const TF_SessionOptions opt, @Cast("const char**") PointerPointer containers, int ncontainers, TF_Status status); public static native void TF_Reset(@Const TF_SessionOptions opt, @Cast("const char**") @ByPtrPtr BytePointer containers, int ncontainers, TF_Status status); public static native void TF_Reset(@Const TF_SessionOptions opt, @Cast("const char**") @ByPtrPtr ByteBuffer containers, int ncontainers, TF_Status status); public static native void TF_Reset(@Const TF_SessionOptions opt, @Cast("const char**") @ByPtrPtr byte[] containers, int ncontainers, TF_Status status); // Treat the bytes proto[0,proto_len-1] as a serialized GraphDef and // add the nodes in that GraphDef to the graph for the session. // // Prefer use of TF_Session and TF_GraphImportGraphDef over this. public static native void TF_ExtendGraph(TF_DeprecatedSession arg0, @Const Pointer proto, @Cast("size_t") long proto_len, TF_Status arg3); // See TF_SessionRun() above. public static native void TF_Run(TF_DeprecatedSession arg0, @Const TF_Buffer run_options, @Cast("const char**") PointerPointer input_names, @Cast("TF_Tensor**") PointerPointer inputs, int ninputs, @Cast("const char**") PointerPointer output_names, @Cast("TF_Tensor**") PointerPointer outputs, int noutputs, @Cast("const char**") PointerPointer target_oper_names, int ntargets, TF_Buffer run_metadata, TF_Status arg11); public static native void TF_Run(TF_DeprecatedSession arg0, @Const TF_Buffer run_options, @Cast("const char**") @ByPtrPtr BytePointer input_names, @ByPtrPtr TF_Tensor inputs, int ninputs, @Cast("const char**") @ByPtrPtr BytePointer output_names, @ByPtrPtr TF_Tensor outputs, int noutputs, @Cast("const char**") @ByPtrPtr BytePointer target_oper_names, int ntargets, TF_Buffer run_metadata, TF_Status arg11); public static native void TF_Run(TF_DeprecatedSession arg0, @Const TF_Buffer run_options, @Cast("const char**") @ByPtrPtr ByteBuffer input_names, @ByPtrPtr TF_Tensor inputs, int ninputs, @Cast("const char**") @ByPtrPtr ByteBuffer output_names, @ByPtrPtr TF_Tensor outputs, int noutputs, @Cast("const char**") @ByPtrPtr ByteBuffer target_oper_names, int ntargets, TF_Buffer run_metadata, TF_Status arg11); public static native void TF_Run(TF_DeprecatedSession arg0, @Const TF_Buffer run_options, @Cast("const char**") @ByPtrPtr byte[] input_names, @ByPtrPtr TF_Tensor inputs, int ninputs, @Cast("const char**") @ByPtrPtr byte[] output_names, @ByPtrPtr TF_Tensor outputs, int noutputs, @Cast("const char**") @ByPtrPtr byte[] target_oper_names, int ntargets, TF_Buffer run_metadata, TF_Status arg11); // See TF_SessionPRunSetup() above. public static native void TF_PRunSetup(TF_DeprecatedSession arg0, @Cast("const char**") PointerPointer input_names, int ninputs, @Cast("const char**") PointerPointer output_names, int noutputs, @Cast("const char**") PointerPointer target_oper_names, int ntargets, @Cast("const char**") PointerPointer handle, TF_Status arg8); public static native void TF_PRunSetup(TF_DeprecatedSession arg0, @Cast("const char**") @ByPtrPtr BytePointer input_names, int ninputs, @Cast("const char**") @ByPtrPtr BytePointer output_names, int noutputs, @Cast("const char**") @ByPtrPtr BytePointer target_oper_names, int ntargets, @Cast("const char**") @ByPtrPtr BytePointer handle, TF_Status arg8); public static native void TF_PRunSetup(TF_DeprecatedSession arg0, @Cast("const char**") @ByPtrPtr ByteBuffer input_names, int ninputs, @Cast("const char**") @ByPtrPtr ByteBuffer output_names, int noutputs, @Cast("const char**") @ByPtrPtr ByteBuffer target_oper_names, int ntargets, @Cast("const char**") @ByPtrPtr ByteBuffer handle, TF_Status arg8); public static native void TF_PRunSetup(TF_DeprecatedSession arg0, @Cast("const char**") @ByPtrPtr byte[] input_names, int ninputs, @Cast("const char**") @ByPtrPtr byte[] output_names, int noutputs, @Cast("const char**") @ByPtrPtr byte[] target_oper_names, int ntargets, @Cast("const char**") @ByPtrPtr byte[] handle, TF_Status arg8); // See TF_SessionPRun above. public static native void TF_PRun(TF_DeprecatedSession arg0, @Cast("const char*") BytePointer handle, @Cast("const char**") PointerPointer input_names, @Cast("TF_Tensor**") PointerPointer inputs, int ninputs, @Cast("const char**") PointerPointer output_names, @Cast("TF_Tensor**") PointerPointer outputs, int noutputs, @Cast("const char**") PointerPointer target_oper_names, int ntargets, TF_Status arg10); public static native void TF_PRun(TF_DeprecatedSession arg0, @Cast("const char*") BytePointer handle, @Cast("const char**") @ByPtrPtr BytePointer input_names, @ByPtrPtr TF_Tensor inputs, int ninputs, @Cast("const char**") @ByPtrPtr BytePointer output_names, @ByPtrPtr TF_Tensor outputs, int noutputs, @Cast("const char**") @ByPtrPtr BytePointer target_oper_names, int ntargets, TF_Status arg10); public static native void TF_PRun(TF_DeprecatedSession arg0, String handle, @Cast("const char**") @ByPtrPtr ByteBuffer input_names, @ByPtrPtr TF_Tensor inputs, int ninputs, @Cast("const char**") @ByPtrPtr ByteBuffer output_names, @ByPtrPtr TF_Tensor outputs, int noutputs, @Cast("const char**") @ByPtrPtr ByteBuffer target_oper_names, int ntargets, TF_Status arg10); public static native void TF_PRun(TF_DeprecatedSession arg0, @Cast("const char*") BytePointer handle, @Cast("const char**") @ByPtrPtr byte[] input_names, @ByPtrPtr TF_Tensor inputs, int ninputs, @Cast("const char**") @ByPtrPtr byte[] output_names, @ByPtrPtr TF_Tensor outputs, int noutputs, @Cast("const char**") @ByPtrPtr byte[] target_oper_names, int ntargets, TF_Status arg10); public static native void TF_PRun(TF_DeprecatedSession arg0, String handle, @Cast("const char**") @ByPtrPtr BytePointer input_names, @ByPtrPtr TF_Tensor inputs, int ninputs, @Cast("const char**") @ByPtrPtr BytePointer output_names, @ByPtrPtr TF_Tensor outputs, int noutputs, @Cast("const char**") @ByPtrPtr BytePointer target_oper_names, int ntargets, TF_Status arg10); public static native void TF_PRun(TF_DeprecatedSession arg0, @Cast("const char*") BytePointer handle, @Cast("const char**") @ByPtrPtr ByteBuffer input_names, @ByPtrPtr TF_Tensor inputs, int ninputs, @Cast("const char**") @ByPtrPtr ByteBuffer output_names, @ByPtrPtr TF_Tensor outputs, int noutputs, @Cast("const char**") @ByPtrPtr ByteBuffer target_oper_names, int ntargets, TF_Status arg10); public static native void TF_PRun(TF_DeprecatedSession arg0, String handle, @Cast("const char**") @ByPtrPtr byte[] input_names, @ByPtrPtr TF_Tensor inputs, int ninputs, @Cast("const char**") @ByPtrPtr byte[] output_names, @ByPtrPtr TF_Tensor outputs, int noutputs, @Cast("const char**") @ByPtrPtr byte[] target_oper_names, int ntargets, TF_Status arg10); // Lists all devices in a TF_Session. // // Caller takes ownership of the returned TF_DeviceList* which must eventually // be freed with a call to TF_DeleteDeviceList. public static native TF_DeviceList TF_SessionListDevices(TF_Session session, TF_Status status); // Lists all devices in a TF_Session. // // Caller takes ownership of the returned TF_DeviceList* which must eventually // be freed with a call to TF_DeleteDeviceList. public static native TF_DeviceList TF_DeprecatedSessionListDevices( TF_DeprecatedSession session, TF_Status status); // Deallocates the device list. public static native void TF_DeleteDeviceList(TF_DeviceList list); // Counts the number of elements in the device list. public static native int TF_DeviceListCount(@Const TF_DeviceList list); // Retrieves the full name of the device (e.g. /job:worker/replica:0/...) // The return value will be a pointer to a null terminated string. The caller // must not modify or delete the string. It will be deallocated upon a call to // TF_DeleteDeviceList. // // If index is out of bounds, an error code will be set in the status object, // and a null pointer will be returned. public static native @Cast("const char*") BytePointer TF_DeviceListName(@Const TF_DeviceList list, int index, TF_Status status); // Retrieves the type of the device at the given index. // // The caller must not modify or delete the string. It will be deallocated upon // a call to TF_DeleteDeviceList. // // If index is out of bounds, an error code will be set in the status object, // and a null pointer will be returned. public static native @Cast("const char*") BytePointer TF_DeviceListType(@Const TF_DeviceList list, int index, TF_Status status); // Retrieve the amount of memory associated with a given device. // // If index is out of bounds, an error code will be set in the status object, // and -1 will be returned. public static native @Cast("int64_t") long TF_DeviceListMemoryBytes( @Const TF_DeviceList list, int index, TF_Status status); // Retrieve the incarnation number of a given device. // // If index is out of bounds, an error code will be set in the status object, // and 0 will be returned. public static native @Cast("uint64_t") long TF_DeviceListIncarnation( @Const TF_DeviceList list, int index, TF_Status status); // -------------------------------------------------------------------------- // Load plugins containing custom ops and kernels // TF_Library holds information about dynamically loaded TensorFlow plugins. // Load the library specified by library_filename and register the ops and // kernels present in that library. // // Pass "library_filename" to a platform-specific mechanism for dynamically // loading a library. The rules for determining the exact location of the // library are platform-specific and are not documented here. // // On success, place OK in status and return the newly created library handle. // The caller owns the library handle. // // On failure, place an error status in status and return NULL. public static native TF_Library TF_LoadLibrary(@Cast("const char*") BytePointer library_filename, TF_Status status); public static native TF_Library TF_LoadLibrary(String library_filename, TF_Status status); // Get the OpList of OpDefs defined in the library pointed by lib_handle. // // Returns a TF_Buffer. The memory pointed to by the result is owned by // lib_handle. The data in the buffer will be the serialized OpList proto for // ops defined in the library. public static native @ByVal TF_Buffer TF_GetOpList(TF_Library lib_handle); // Frees the memory associated with the library handle. // Does NOT unload the library. public static native void TF_DeleteLibraryHandle(TF_Library lib_handle); // Get the OpList of all OpDefs defined in this address space. // Returns a TF_Buffer, ownership of which is transferred to the caller // (and can be freed using TF_DeleteBuffer). // // The data in the buffer will be the serialized OpList proto for ops registered // in this address space. public static native TF_Buffer TF_GetAllOpList(); // TF_ApiDefMap encapsulates a collection of API definitions for an operation. // // This object maps the name of a TensorFlow operation to a description of the // API to generate for it, as defined by the ApiDef protocol buffer ( // https://www.tensorflow.org/code/tensorflow/core/framework/api_def.proto) // // The ApiDef messages are typically used to generate convenience wrapper // functions for TensorFlow operations in various language bindings. // Creates a new TF_ApiDefMap instance. // // Params: // op_list_buffer - TF_Buffer instance containing serialized OpList // protocol buffer. (See // https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto // for the OpList proto definition). // status - Set to OK on success and an appropriate error on failure. public static native TF_ApiDefMap TF_NewApiDefMap(TF_Buffer op_list_buffer, TF_Status status); // Deallocates a TF_ApiDefMap. public static native void TF_DeleteApiDefMap(TF_ApiDefMap apimap); // Add ApiDefs to the map. // // `text` corresponds to a text representation of an ApiDefs protocol message. // (https://www.tensorflow.org/code/tensorflow/core/framework/api_def.proto). // // The provided ApiDefs will be merged with existing ones in the map, with // precedence given to the newly added version in case of conflicts with // previous calls to TF_ApiDefMapPut. public static native void TF_ApiDefMapPut(TF_ApiDefMap api_def_map, @Cast("const char*") BytePointer text, @Cast("size_t") long text_len, TF_Status status); public static native void TF_ApiDefMapPut(TF_ApiDefMap api_def_map, String text, @Cast("size_t") long text_len, TF_Status status); // Returns a serialized ApiDef protocol buffer for the TensorFlow operation // named `name`. public static native TF_Buffer TF_ApiDefMapGet(TF_ApiDefMap api_def_map, @Cast("const char*") BytePointer name, @Cast("size_t") long name_len, TF_Status status); public static native TF_Buffer TF_ApiDefMapGet(TF_ApiDefMap api_def_map, String name, @Cast("size_t") long name_len, TF_Status status); // -------------------------------------------------------------------------- // Kernel definition information. // Returns a serialized KernelList protocol buffer containing KernelDefs for all // registered kernels. public static native TF_Buffer TF_GetAllRegisteredKernels(TF_Status status); // Returns a serialized KernelList protocol buffer containing KernelDefs for all // kernels registered for the operation named `name`. public static native TF_Buffer TF_GetRegisteredKernelsForOp( @Cast("const char*") BytePointer name, TF_Status status); public static native TF_Buffer TF_GetRegisteredKernelsForOp( String name, TF_Status status); // #ifdef __cplusplus /* end extern "C" */ // #endif // #endif // TENSORFLOW_C_C_API_H_ // Parsed from tensorflow/c/c_api_internal.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_C_C_API_INTERNAL_H_ // #define TENSORFLOW_C_C_API_INTERNAL_H_ // #include "tensorflow/c/c_api.h" // #include // #include // #include // #include // #include // #ifndef __ANDROID__ // #include "tensorflow/core/framework/op_gen_lib.h" // #endif // #include "tensorflow/core/common_runtime/shape_refiner.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/graph/graph.h" // #include "tensorflow/core/graph/graph_constructor.h" // #include "tensorflow/core/graph/node_builder.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/platform/mutex.h" // #include "tensorflow/core/platform/types.h" // #include "tensorflow/core/public/session.h" // namespace tensorflow // Internal structures used by the C API. These are likely to change and should // not be depended on. public static class TF_Status extends org.bytedeco.javacpp.helper.tensorflow.AbstractTF_Status { static { Loader.load(); } /** Default native constructor. */ public TF_Status() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TF_Status(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_Status(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public TF_Status position(long position) { return (TF_Status)super.position(position); } public native @ByRef Status status(); public native TF_Status status(Status status); } public static class TF_Tensor extends org.bytedeco.javacpp.helper.tensorflow.AbstractTF_Tensor { static { Loader.load(); } /** Default native constructor. */ public TF_Tensor() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TF_Tensor(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_Tensor(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public TF_Tensor position(long position) { return (TF_Tensor)super.position(position); } public native @Cast("TF_DataType") int dtype(); public native TF_Tensor dtype(int dtype); public native @ByRef TensorShape shape(); public native TF_Tensor shape(TensorShape shape); public native TensorBuffer buffer(); public native TF_Tensor buffer(TensorBuffer buffer); } public static class TF_SessionOptions extends org.bytedeco.javacpp.helper.tensorflow.AbstractTF_SessionOptions { static { Loader.load(); } /** Default native constructor. */ public TF_SessionOptions() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TF_SessionOptions(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_SessionOptions(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public TF_SessionOptions position(long position) { return (TF_SessionOptions)super.position(position); } public native @ByRef SessionOptions options(); public native TF_SessionOptions options(SessionOptions options); } public static class TF_DeprecatedSession extends Pointer { static { Loader.load(); } /** Default native constructor. */ public TF_DeprecatedSession() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TF_DeprecatedSession(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_DeprecatedSession(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public TF_DeprecatedSession position(long position) { return (TF_DeprecatedSession)super.position(position); } public native Session session(); public native TF_DeprecatedSession session(Session session); } public static class TF_Library extends Pointer { static { Loader.load(); } /** Default native constructor. */ public TF_Library() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TF_Library(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_Library(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public TF_Library position(long position) { return (TF_Library)super.position(position); } public native Pointer lib_handle(); public native TF_Library lib_handle(Pointer lib_handle); public native @ByRef TF_Buffer op_list(); public native TF_Library op_list(TF_Buffer op_list); } @NoOffset public static class TF_Graph extends org.bytedeco.javacpp.helper.tensorflow.AbstractTF_Graph { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_Graph(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TF_Graph(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public TF_Graph position(long position) { return (TF_Graph)super.position(position); } public TF_Graph() { super((Pointer)null); allocate(); } private native void allocate(); public native @ByRef @Cast("tensorflow::mutex*") Pointer mu(); public native TF_Graph mu(Pointer mu); public native @MemberGetter @ByRef Graph graph(); // Runs shape inference. public native @MemberGetter @ByRef ShapeRefiner refiner(); // Maps from name of an operation to the Node* in 'graph'. public native @ByRef StringNodeMap name_map(); public native TF_Graph name_map(StringNodeMap name_map); // The keys of this map are all the active sessions using this graph. Each // value records whether the graph has been mutated since the corresponding // session has been run (this is detected in RecordMutation function). If the // string is empty, no mutation has occurred. Otherwise the string is a // description of the mutation suitable for returning to the user. // // Sessions are added to this map in TF_NewSession, and removed in // TF_DeleteSession. // TF_Graph may only / must be deleted when // sessions.size() == 0 && delete_requested == true // // TODO(b/74949947): mutations currently trigger a warning instead of a bad // status, this should be reverted when possible. public native @ByRef TF_SessionStringMap sessions(); public native TF_Graph sessions(TF_SessionStringMap sessions); public native @Cast("bool") boolean delete_requested(); public native TF_Graph delete_requested(boolean delete_requested); // set true by TF_DeleteGraph // Used to link graphs contained in TF_WhileParams to the parent graph that // will eventually contain the full while loop. public native TF_Graph parent(); public native TF_Graph parent(TF_Graph parent); public native TF_Output parent_inputs(); public native TF_Graph parent_inputs(TF_Output parent_inputs); } @NoOffset public static class TF_OperationDescription extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_OperationDescription(Pointer p) { super(p); } public TF_OperationDescription(TF_Graph g, @Cast("const char*") BytePointer op_type, @Cast("const char*") BytePointer node_name) { super((Pointer)null); allocate(g, op_type, node_name); } private native void allocate(TF_Graph g, @Cast("const char*") BytePointer op_type, @Cast("const char*") BytePointer node_name); public TF_OperationDescription(TF_Graph g, String op_type, String node_name) { super((Pointer)null); allocate(g, op_type, node_name); } private native void allocate(TF_Graph g, String op_type, String node_name); public native @ByRef NodeBuilder node_builder(); public native TF_OperationDescription node_builder(NodeBuilder node_builder); public native TF_Graph graph(); public native TF_OperationDescription graph(TF_Graph graph); public native @ByRef StringSet colocation_constraints(); public native TF_OperationDescription colocation_constraints(StringSet colocation_constraints); } public static class TF_Operation extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_Operation(Pointer p) { super(p); } public native @MemberGetter @ByRef Node node(); } @NoOffset public static class TF_Session extends org.bytedeco.javacpp.helper.tensorflow.AbstractTF_Session { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_Session(Pointer p) { super(p); } public TF_Session(Session s, TF_Graph g) { super((Pointer)null); allocate(s, g); } private native void allocate(Session s, TF_Graph g); public native Session session(); public native TF_Session session(Session session); @MemberGetter public native TF_Graph graph(); public native @ByRef @Cast("tensorflow::mutex*") Pointer mu(); public native TF_Session mu(Pointer mu); public native int last_num_graph_nodes(); public native TF_Session last_num_graph_nodes(int last_num_graph_nodes); // If true, TF_SessionRun and similar methods will call // ExtendSessionGraphHelper before running the graph (this is the default // public behavior). Can be set to false if the caller needs to call // ExtendSessionGraphHelper manually. public native @MemberGetter @ByRef @Cast("std::atomic*") Pointer extend_before_run(); } public static class TF_ImportGraphDefOptions extends org.bytedeco.javacpp.helper.tensorflow.AbstractTF_ImportGraphDefOptions { static { Loader.load(); } /** Default native constructor. */ public TF_ImportGraphDefOptions() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TF_ImportGraphDefOptions(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_ImportGraphDefOptions(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public TF_ImportGraphDefOptions position(long position) { return (TF_ImportGraphDefOptions)super.position(position); } public native @ByRef ImportGraphDefOptions opts(); public native TF_ImportGraphDefOptions opts(ImportGraphDefOptions opts); // Backing memory for TensorId fields in opts. // TODO(skyewm): it'd be better if ImportGraphDefOptions owned this. public native @ByRef StringList tensor_id_data(); public native TF_ImportGraphDefOptions tensor_id_data(StringList tensor_id_data); } public static class TF_ImportGraphDefResults extends Pointer { static { Loader.load(); } /** Default native constructor. */ public TF_ImportGraphDefResults() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TF_ImportGraphDefResults(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_ImportGraphDefResults(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public TF_ImportGraphDefResults position(long position) { return (TF_ImportGraphDefResults)super.position(position); } public native @StdVector TF_Output return_tensors(); public native TF_ImportGraphDefResults return_tensors(TF_Output return_tensors); public native @Cast("TF_Operation**") @StdVector PointerPointer return_nodes(); public native TF_ImportGraphDefResults return_nodes(PointerPointer return_nodes); @MemberGetter public native @Cast("const char**") @StdVector PointerPointer missing_unused_key_names(); public native @StdVector IntPointer missing_unused_key_indexes(); public native TF_ImportGraphDefResults missing_unused_key_indexes(IntPointer missing_unused_key_indexes); // Backing memory for missing_unused_key_names values. public native @ByRef StringList missing_unused_key_names_data(); public native TF_ImportGraphDefResults missing_unused_key_names_data(StringList missing_unused_key_names_data); } public static class TF_DeviceList extends Pointer { static { Loader.load(); } /** Default native constructor. */ public TF_DeviceList() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TF_DeviceList(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_DeviceList(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public TF_DeviceList position(long position) { return (TF_DeviceList)super.position(position); } public native @StdVector DeviceAttributes response(); public native TF_DeviceList response(DeviceAttributes response); } public static class TF_Function extends Pointer { static { Loader.load(); } /** Default native constructor. */ public TF_Function() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TF_Function(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_Function(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public TF_Function position(long position) { return (TF_Function)super.position(position); } public native @ByRef FunctionDef fdef(); public native TF_Function fdef(FunctionDef fdef); } @NoOffset public static class TF_ApiDefMap extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TF_ApiDefMap(Pointer p) { super(p); } public TF_ApiDefMap(@Const @ByRef OpList op_list) { super((Pointer)null); allocate(op_list); } private native void allocate(@Const @ByRef OpList op_list); // #ifndef __ANDROID__ public native @ByRef ApiDefMap api_def_map(); public native TF_ApiDefMap api_def_map(ApiDefMap api_def_map); // #endif public native @Cast("bool") boolean update_docs_called(); public native TF_ApiDefMap update_docs_called(boolean update_docs_called); public native @ByRef @Cast("tensorflow::mutex*") Pointer lock(); public native TF_ApiDefMap lock(Pointer lock); } @Namespace("tensorflow") public static class TensorCApi extends Pointer { static { Loader.load(); } /** Default native constructor. */ public TensorCApi() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TensorCApi(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorCApi(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public TensorCApi position(long position) { return (TensorCApi)super.position(position); } public static native TensorBuffer Buffer(@Const @ByRef Tensor tensor); public static native @ByVal Tensor MakeTensor(@Cast("TF_DataType") int type, @Const @ByRef TensorShape shape, TensorBuffer buf); } @Namespace("tensorflow") public static native @ByVal Status TF_TensorToTensor(@Const TF_Tensor src, Tensor dst); @Namespace("tensorflow") public static native TF_Tensor TF_TensorFromTensor(@Const @ByRef Tensor src, TF_Status status); @Namespace("tensorflow") public static native @ByVal Status MessageToBuffer(@Cast("const tensorflow::protobuf::Message*") @ByRef MessageLite in, TF_Buffer out); // Set the shapes and types of the output's handle. // // The lengths of the arrays pointed to by `shapes`, `ranks`, and `types` must // all be equal to `num_shapes_and_types`. If `ranks[i] != -1`, (i.e., if the // rank is known), then it must be equal to the length of `shapes[i]`; if // `ranks[i] == 1`, then `shapes[i]` may be nullptr. // // TODO(akshayka): Implement a corresponding getter method. @Namespace("tensorflow") public static native void TF_GraphSetOutputHandleShapesAndTypes(TF_Graph graph, @ByVal TF_Output output, int num_shapes_and_types, @Cast("const int64_t**") PointerPointer shapes, @Const IntPointer ranks, @Cast("const TF_DataType*") IntPointer types, TF_Status status); @Namespace("tensorflow") public static native void TF_GraphSetOutputHandleShapesAndTypes(TF_Graph graph, @ByVal TF_Output output, int num_shapes_and_types, @Cast("const int64_t**") @ByPtrPtr LongPointer shapes, @Const IntPointer ranks, @Cast("const TF_DataType*") IntPointer types, TF_Status status); @Namespace("tensorflow") public static native void TF_GraphSetOutputHandleShapesAndTypes(TF_Graph graph, @ByVal TF_Output output, int num_shapes_and_types, @Cast("const int64_t**") @ByPtrPtr LongBuffer shapes, @Const IntBuffer ranks, @Cast("const TF_DataType*") IntBuffer types, TF_Status status); @Namespace("tensorflow") public static native void TF_GraphSetOutputHandleShapesAndTypes(TF_Graph graph, @ByVal TF_Output output, int num_shapes_and_types, @Cast("const int64_t**") @ByPtrPtr long[] shapes, @Const int[] ranks, @Cast("const TF_DataType*") int[] types, TF_Status status); @Namespace("tensorflow") public static native void RecordMutation(TF_Graph graph, @Const @ByRef TF_Operation op, @Cast("const char*") BytePointer mutation_type); @Namespace("tensorflow") public static native void RecordMutation(TF_Graph graph, @Const @ByRef TF_Operation op, String mutation_type); @Namespace("tensorflow") public static native @Cast("bool") boolean ExtendSessionGraphHelper(TF_Session session, TF_Status status); // end namespace tensorflow // #endif // TENSORFLOW_C_C_API_INTERNAL_H_ // Parsed from tensorflow/c/python_api.h /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_C_PYTHON_API_H_ // #define TENSORFLOW_C_PYTHON_API_H_ // #include // #include "tensorflow/c/c_api.h" // These functions can be removed without notice. They exist to facilitate some // refactoring of graph construction code in the Python API. @Namespace("tensorflow") public static native void AddControlInput(TF_Graph graph, TF_Operation op, TF_Operation input); // Changes an attr value in the node_def Protocol Buffer and sets a status upon // completion. @Namespace("tensorflow") public static native void SetAttr(TF_Graph graph, TF_Operation op, @Cast("const char*") BytePointer attr_name, TF_Buffer attr_value_proto, TF_Status status); @Namespace("tensorflow") public static native void SetAttr(TF_Graph graph, TF_Operation op, String attr_name, TF_Buffer attr_value_proto, TF_Status status); @Namespace("tensorflow") public static native void SetRequestedDevice(TF_Graph graph, TF_Operation op, @Cast("const char*") BytePointer device); @Namespace("tensorflow") public static native void SetRequestedDevice(TF_Graph graph, TF_Operation op, String device); @Namespace("tensorflow") public static native void UpdateEdge(TF_Graph graph, @ByVal TF_Output new_src, @ByVal TF_Input dst, TF_Status status); @Namespace("tensorflow") public static native void RemoveAllControlInputs(TF_Graph graph, TF_Operation op); // Sets whether ops missing a shape inference function should trigger an // error. The default is true. @Namespace("tensorflow") public static native void SetRequireShapeInferenceFns(TF_Graph graph, @Cast("bool") boolean require); // Extends `session` with any new operations added to its associated graph. // Usually this happens automatically in TF_SessionRun. After this is called, // TF_SessionRun will no longer extend the session on every call. // // We expose this here to allow fine-grained synchronization in multi-threaded // workloads, which is required since the Python implementation depends on the // above mutation methods. This allows us to prevent modifications to nodes in // the graph after the session has been made aware of them. @Namespace("tensorflow") public static native void ExtendSession(TF_Session session, TF_Status status); // Returns the serialized CppShapeInferenceResult::HandleData proto for // `output` if its a resource or variant tensor, or otherwise returns the empty // string. @Namespace("tensorflow") public static native @StdString BytePointer GetHandleShapeAndType(TF_Graph graph, @ByVal TF_Output output); // Sets `output` based on `proto`, which should be a serialized // CppShapeInferenceResult::HandleData proto. `output` should be a resource // or variant tensor. // NOTE(skyewm): `proto` is passed a void*/size_t pair instead of a std::string // because I couldn't get SWIG to work otherwise. @Namespace("tensorflow") public static native void SetHandleShapeAndType(TF_Graph graph, @ByVal TF_Output output, @Const Pointer proto, @Cast("size_t") long proto_len, TF_Status status); // namespace tensorflow // #endif // TENSORFLOW_C_PYTHON_API_H_ // Parsed from tensorflow/core/framework/op_def_builder.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // Class and associated machinery for specifying an Op's OpDef and shape // inference function for Op registration. // #ifndef TENSORFLOW_CORE_FRAMEWORK_OP_DEF_BUILDER_H_ // #define TENSORFLOW_CORE_FRAMEWORK_OP_DEF_BUILDER_H_ // #include // #include // #include "tensorflow/core/framework/op_def.pb.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/lib/core/stringpiece.h" // #include "tensorflow/core/platform/macros.h" @Namespace("tensorflow") @NoOffset public static class OpRegistrationData extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OpRegistrationData(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public OpRegistrationData(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public OpRegistrationData position(long position) { return (OpRegistrationData)super.position(position); } public OpRegistrationData() { super((Pointer)null); allocate(); } private native void allocate(); public OpRegistrationData(@Const @ByRef OpDef def) { super((Pointer)null); allocate(def); } private native void allocate(@Const @ByRef OpDef def); public OpRegistrationData(@Const @ByRef OpDef def, @Cast("const tensorflow::OpShapeInferenceFn*") @ByRef Pointer fn, @Cast("bool") boolean is_function/*=false*/) { super((Pointer)null); allocate(def, fn, is_function); } private native void allocate(@Const @ByRef OpDef def, @Cast("const tensorflow::OpShapeInferenceFn*") @ByRef Pointer fn, @Cast("bool") boolean is_function/*=false*/); public OpRegistrationData(@Const @ByRef OpDef def, @Cast("const tensorflow::OpShapeInferenceFn*") @ByRef Pointer fn) { super((Pointer)null); allocate(def, fn); } private native void allocate(@Const @ByRef OpDef def, @Cast("const tensorflow::OpShapeInferenceFn*") @ByRef Pointer fn); public native @ByRef OpDef op_def(); public native OpRegistrationData op_def(OpDef op_def); @MemberSetter public native OpRegistrationData shape_inference_fn(@ByVal ShapeInferenceFn shape_inference_fn); public native @Cast("bool") boolean is_function_op(); public native OpRegistrationData is_function_op(boolean is_function_op); } // Builder class passed to the REGISTER_OP() macro. @Namespace("tensorflow") @NoOffset public static class OpDefBuilder extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OpDefBuilder(Pointer p) { super(p); } // Constructs an OpDef with just the name field set. public OpDefBuilder(@StdString BytePointer op_name) { super((Pointer)null); allocate(op_name); } private native void allocate(@StdString BytePointer op_name); public OpDefBuilder(@StdString String op_name) { super((Pointer)null); allocate(op_name); } private native void allocate(@StdString String op_name); // Adds an attr to this OpDefBuilder (and returns *this). The spec has // format ":" or ":=" // where matches regexp [a-zA-Z][a-zA-Z0-9_]* // (by convention only using capital letters for attrs that can be inferred) // can be: // "string", "int", "float", "bool", "type", "shape", or "tensor" // "numbertype", "realnumbertype", "quantizedtype" // (meaning "type" with a restriction on valid values) // "{int32,int64}" or {realnumbertype,quantizedtype,string}" // (meaning "type" with a restriction containing unions of value types) // "{\"foo\", \"bar\n baz\"}", or "{'foo', 'bar\n baz'}" // (meaning "string" with a restriction on valid values) // "list(string)", ..., "list(tensor)", "list(numbertype)", ... // (meaning lists of the above types) // "int >= 2" (meaning "int" with a restriction on valid values) // "list(string) >= 2", "list(int) >= 2" // (meaning "list(string)" / "list(int)" with length at least 2) // , if included, should use the Proto text format // of . For lists use [a, b, c] format. // // Note that any attr specifying the length of an input or output will // get a default minimum of 1 unless the >= # syntax is used. // // TODO(josh11b): Perhaps support restrictions and defaults as optional // extra arguments to Attr() instead of encoding them in the spec string. // TODO(josh11b): Would like to have better dtype handling for tensor attrs: // * Ability to say the type of an input/output matches the type of // the tensor. // * Ability to restrict the type of the tensor like the existing // restrictions for type attrs. // Perhaps by linking the type of the tensor to a type attr? public native @ByRef OpDefBuilder Attr(@StdString BytePointer spec); public native @ByRef OpDefBuilder Attr(@StdString String spec); // Adds an input or output to this OpDefBuilder (and returns *this). // The spec has form ":" or ":Ref()" // where matches regexp [a-z][a-z0-9_]* and can be: // * For a single tensor: // * For a sequence of tensors with the same type: * // * For a sequence of tensors with different types: // Where: // is either one of "float", "int32", "string", ... // or the name of an attr (see above) with type "type". // is the name of an attr with type "int". // is the name of an attr with type "list(type)". // TODO(josh11b): Indicate Ref() via an optional argument instead of // in the spec? // TODO(josh11b): SparseInput() and SparseOutput() matching the Python // handling? public native @ByRef OpDefBuilder Input(@StdString BytePointer spec); public native @ByRef OpDefBuilder Input(@StdString String spec); public native @ByRef OpDefBuilder Output(@StdString BytePointer spec); public native @ByRef OpDefBuilder Output(@StdString String spec); // Turns on the indicated boolean flag in this OpDefBuilder (and // returns *this). public native @ByRef OpDefBuilder SetIsCommutative(); public native @ByRef OpDefBuilder SetIsAggregate(); public native @ByRef OpDefBuilder SetIsStateful(); public native @ByRef OpDefBuilder SetAllowsUninitializedInput(); // Deprecate the op at a certain GraphDef version. public native @ByRef OpDefBuilder Deprecated(int version, @StdString BytePointer explanation); public native @ByRef OpDefBuilder Deprecated(int version, @StdString String explanation); // Adds docs to this OpDefBuilder (and returns *this). // Docs have the format: // <1-line summary> // // : // : // // Where is the name of an attr, input, or output. Please // wrap docs at 72 columns so that it may be indented in the // generated output. For tensor inputs or outputs (not attrs), you // may start the description with an "=" (like name:= ) // to suppress the automatically-generated type documentation in // generated output. // #ifndef TF_LEAN_BINARY public native @ByRef OpDefBuilder Doc(@StdString BytePointer text); public native @ByRef OpDefBuilder Doc(@StdString String text); // #else // #endif // Sets the shape function to be used for shape inference. // // Note that currently (October 2016), python code still requires a // RegisterShape call to invoke this; see call_cpp_shape_fn in // python/framework/common_shapes.py public static class Fn_InferenceContext extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Fn_InferenceContext(Pointer p) { super(p); } protected Fn_InferenceContext() { allocate(); } private native void allocate(); public native @ByVal Status call(InferenceContext arg0); } public native @ByRef OpDefBuilder SetShapeFn(Fn_InferenceContext fn); // Sets op_reg_data->op_def to the requested OpDef and // op_reg_data->shape_inference_fn to the requested shape inference function, // or returns an error. // Must be called after all of the above methods. // // Note that OpDefBuilder only reports parsing errors. You should also // call ValidateOpDef() to detect other problems. public native @ByVal Status Finalize(OpRegistrationData op_reg_data); } // namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_OP_DEF_BUILDER_H_ // Parsed from tensorflow/core/framework/op_def_util.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // TODO(josh11b): Probably not needed for OpKernel authors, so doesn't // need to be as publicly accessible as other files in framework/. // #ifndef TENSORFLOW_CORE_FRAMEWORK_OP_DEF_UTIL_H_ // #define TENSORFLOW_CORE_FRAMEWORK_OP_DEF_UTIL_H_ // #include // #include "tensorflow/core/framework/api_def.pb.h" // #include "tensorflow/core/framework/op_def.pb.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/platform/protobuf.h" // Performs a consistency check across the fields of the op_def. @Namespace("tensorflow") public static native @ByVal Status ValidateOpDef(@Const @ByRef OpDef op_def); // Check if an op is deprecated at the given GraphDef version. If the op is // deprecated at a future version, a warning will be logged. @Namespace("tensorflow") public static native @ByVal Status CheckOpDeprecation(@Const @ByRef OpDef op_def, int graph_def_version); // Validates that attr_value satisfies the type and constraints from attr. // REQUIRES: attr has already been validated. @Namespace("tensorflow") public static native @ByVal Status ValidateAttrValue(@Const @ByRef AttrValue attr_value, @Cast("const tensorflow::OpDef::AttrDef*") @ByRef OpDef_AttrDef attr); // The following search through op_def for an attr with the indicated name. // Returns nullptr if no such attr is found. @Namespace("tensorflow") public static native @Cast("const tensorflow::OpDef::AttrDef*") OpDef_AttrDef FindAttr(@StringPiece BytePointer name, @Const @ByRef OpDef op_def); @Namespace("tensorflow") public static native @Cast("const tensorflow::OpDef::AttrDef*") OpDef_AttrDef FindAttr(@StringPiece String name, @Const @ByRef OpDef op_def); @Namespace("tensorflow") public static native @Cast("tensorflow::OpDef::AttrDef*") OpDef_AttrDef FindAttrMutable(@StringPiece BytePointer name, OpDef op_def); @Namespace("tensorflow") public static native @Cast("tensorflow::OpDef::AttrDef*") OpDef_AttrDef FindAttrMutable(@StringPiece String name, OpDef op_def); // Searches op_def for input argument with the indicated name. // Returns nullptr if no such attr is found. @Namespace("tensorflow") public static native @Cast("const tensorflow::OpDef::ArgDef*") OpDef_ArgDef FindInputArg(@StringPiece BytePointer name, @Const @ByRef OpDef op_def); @Namespace("tensorflow") public static native @Cast("const tensorflow::OpDef::ArgDef*") OpDef_ArgDef FindInputArg(@StringPiece String name, @Const @ByRef OpDef op_def); // Searches api_def for input argument with the indicated name. // Returns nullptr if no such attr is found. @Namespace("tensorflow") public static native @Cast("const tensorflow::ApiDef::Arg*") ApiDef_Arg FindInputArg(@StringPiece BytePointer name, @Const @ByRef ApiDef api_def); @Namespace("tensorflow") public static native @Cast("const tensorflow::ApiDef::Arg*") ApiDef_Arg FindInputArg(@StringPiece String name, @Const @ByRef ApiDef api_def); // Produce a human-readable version of an op_def that is more concise // than a text-format proto. Excludes descriptions. @Namespace("tensorflow") public static native @StdString BytePointer SummarizeOpDef(@Const @ByRef OpDef op_def); // Returns an error if new_op is not backwards-compatible with (more // accepting than) old_op. // REQUIRES: old_op and new_op must pass validation. @Namespace("tensorflow") public static native @ByVal Status OpDefCompatible(@Const @ByRef OpDef old_op, @Const @ByRef OpDef new_op); // Returns an error if any attr in penultimate_op that is not in old_op // has a different default value in new_op. In general it is not safe // to change the default for an attr that has been added to an op. @Namespace("tensorflow") public static native @ByVal Status OpDefAddedDefaultsUnchanged(@Const @ByRef OpDef old_op, @Const @ByRef OpDef penultimate_op, @Const @ByRef OpDef new_op); // Returns an error if the default value for any attr is added/removed/modified // in new_op compared to old_op. @Namespace("tensorflow") public static native @ByVal Status OpDefAttrDefaultsUnchanged(@Const @ByRef OpDef old_op, @Const @ByRef OpDef new_op); // Remove all docs from *op_def / *op_list. @Namespace("tensorflow") public static native void RemoveDescriptionsFromOpDef(OpDef op_def); @Namespace("tensorflow") public static native void RemoveDescriptionsFromOpList(OpList op_list); // Remove docs from *op_def but leave explanations of deprecations. @Namespace("tensorflow") public static native void RemoveNonDeprecationDescriptionsFromOpDef(OpDef op_def); // Returns true if `a1` is equal to `a2`. // Equality includes all the fields. @Namespace("tensorflow") public static native @Cast("bool") boolean AttrDefEqual(@Cast("const tensorflow::OpDef::AttrDef*") @ByRef OpDef_AttrDef a1, @Cast("const tensorflow::OpDef::AttrDef*") @ByRef OpDef_AttrDef a2); // Returns hash of `a` that is consistent with AttrDefEqual. @Namespace("tensorflow") public static native @Cast("tensorflow::uint64") long AttrDefHash(@Cast("const tensorflow::OpDef::AttrDef*") @ByRef OpDef_AttrDef a); // Returns true if all AttrDefs in `a1` equal corresponding AttrDefs in // `a2`. Correspondence is established by name. // Returns hash of `a` that is consistent with RepeatedAttrDefEqual // Returns true if `o1` is equal to `o2`. // Equality includes all the fields. OpDef.attr field is treated as a set. @Namespace("tensorflow") public static native @Cast("bool") boolean OpDefEqual(@Const @ByRef OpDef o1, @Const @ByRef OpDef o2); // Returns hash of `o` that is consistent with AttrDefEqual. @Namespace("tensorflow") public static native @Cast("tensorflow::uint64") long OpDefHash(@Const @ByRef OpDef o); // namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_OP_DEF_UTIL_H_ // Parsed from tensorflow/core/framework/op.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_OP_H_ // #define TENSORFLOW_CORE_FRAMEWORK_OP_H_ // #include // #include // #include // #include "tensorflow/core/framework/op_def_builder.h" // #include "tensorflow/core/framework/op_def_util.h" // #include "tensorflow/core/framework/selective_registration.h" // #include "tensorflow/core/lib/core/errors.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/lib/strings/str_util.h" // #include "tensorflow/core/lib/strings/strcat.h" // #include "tensorflow/core/platform/logging.h" // #include "tensorflow/core/platform/macros.h" // #include "tensorflow/core/platform/mutex.h" // #include "tensorflow/core/platform/thread_annotations.h" // #include "tensorflow/core/platform/types.h" // Users that want to look up an OpDef by type name should take an // OpRegistryInterface. Functions accepting a // (const) OpRegistryInterface* may call LookUp() from multiple threads. @Namespace("tensorflow") public static class OpRegistryInterface extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OpRegistryInterface(Pointer p) { super(p); } // Returns an error status and sets *op_reg_data to nullptr if no OpDef is // registered under that name, otherwise returns the registered OpDef. // Caller must not delete the returned pointer. public native @ByVal Status LookUp(@StdString BytePointer op_type_name, @Cast("const tensorflow::OpRegistrationData**") PointerPointer op_reg_data); public native @ByVal Status LookUp(@StdString BytePointer op_type_name, @Const @ByPtrPtr OpRegistrationData op_reg_data); public native @ByVal Status LookUp(@StdString String op_type_name, @Const @ByPtrPtr OpRegistrationData op_reg_data); // Shorthand for calling LookUp to get the OpDef. public native @ByVal Status LookUpOpDef(@StdString BytePointer op_type_name, @Cast("const tensorflow::OpDef**") PointerPointer op_def); public native @ByVal Status LookUpOpDef(@StdString BytePointer op_type_name, @Const @ByPtrPtr OpDef op_def); public native @ByVal Status LookUpOpDef(@StdString String op_type_name, @Const @ByPtrPtr OpDef op_def); } // The standard implementation of OpRegistryInterface, along with a // global singleton used for registering ops via the REGISTER // macros below. Thread-safe. // // Example registration: // OpRegistry::Global()->Register( // [](OpRegistrationData* op_reg_data)->Status { // // Populate *op_reg_data here. // return Status::OK(); // }); @Namespace("tensorflow") @NoOffset public static class OpRegistry extends OpRegistryInterface { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OpRegistry(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public OpRegistry(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public OpRegistry position(long position) { return (OpRegistry)super.position(position); } public OpRegistry() { super((Pointer)null); allocate(); } private native void allocate(); public native void Register(@Cast("const tensorflow::OpRegistry::OpRegistrationDataFactory*") @ByRef Pointer op_data_factory); public native @ByVal Status LookUp(@StdString BytePointer op_type_name, @Cast("const tensorflow::OpRegistrationData**") PointerPointer op_reg_data); public native @ByVal Status LookUp(@StdString BytePointer op_type_name, @Const @ByPtrPtr OpRegistrationData op_reg_data); public native @ByVal Status LookUp(@StdString String op_type_name, @Const @ByPtrPtr OpRegistrationData op_reg_data); // Fills *ops with all registered OpDefs (except those with names // starting with '_' if include_internal == false) sorted in // ascending alphabetical order. public native void Export(@Cast("bool") boolean include_internal, OpList ops); // Returns ASCII-format OpList for all registered OpDefs (except // those with names starting with '_' if include_internal == false). public native @StdString BytePointer DebugString(@Cast("bool") boolean include_internal); // A singleton available at startup. public static native OpRegistry Global(); // Get all registered ops. public native void GetRegisteredOps(OpDefVector op_defs); // Get all `OpRegistrationData`s. public native void GetOpRegistrationData(@StdVector OpRegistrationData op_data); // Watcher, a function object. // The watcher, if set by SetWatcher(), is called every time an op is // registered via the Register function. The watcher is passed the Status // obtained from building and adding the OpDef to the registry, and the OpDef // itself if it was successfully built. A watcher returns a Status which is in // turn returned as the final registration status. // An OpRegistry object has only one watcher. This interface is not thread // safe, as different clients are free to set the watcher any time. // Clients are expected to atomically perform the following sequence of // operations : // SetWatcher(a_watcher); // Register some ops; // op_registry->ProcessRegistrations(); // SetWatcher(nullptr); // Returns a non-OK status if a non-null watcher is over-written by another // non-null watcher. public native @ByVal Status SetWatcher(@Cast("const tensorflow::OpRegistry::Watcher*") @ByRef Pointer watcher); // Process the current list of deferred registrations. Note that calls to // Export, LookUp and DebugString would also implicitly process the deferred // registrations. Returns the status of the first failed op registration or // Status::OK() otherwise. public native @ByVal Status ProcessRegistrations(); // Defer the registrations until a later call to a function that processes // deferred registrations are made. Normally, registrations that happen after // calls to Export, LookUp, ProcessRegistrations and DebugString are processed // immediately. Call this to defer future registrations. public native void DeferRegistrations(); // Clear the registrations that have been deferred. public native void ClearDeferredRegistrations(); } // An adapter to allow an OpList to be used as an OpRegistryInterface. // // Note that shape inference functions are not passed in to OpListOpRegistry, so // it will return an unusable shape inference function for every op it supports; // therefore, it should only be used in contexts where this is okay. @Namespace("tensorflow") @NoOffset public static class OpListOpRegistry extends OpRegistryInterface { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OpListOpRegistry(Pointer p) { super(p); } // Does not take ownership of op_list, *op_list must outlive *this. public OpListOpRegistry(@Const OpList op_list) { super((Pointer)null); allocate(op_list); } private native void allocate(@Const OpList op_list); public native @ByVal Status LookUp(@StdString BytePointer op_type_name, @Cast("const tensorflow::OpRegistrationData**") PointerPointer op_reg_data); public native @ByVal Status LookUp(@StdString BytePointer op_type_name, @Const @ByPtrPtr OpRegistrationData op_reg_data); public native @ByVal Status LookUp(@StdString String op_type_name, @Const @ByPtrPtr OpRegistrationData op_reg_data); } // Support for defining the OpDef (specifying the semantics of the Op and how // it should be created) and registering it in the OpRegistry::Global() // registry. Usage: // // REGISTER_OP("my_op_name") // .Attr(":") // .Attr(":=") // .Input(":") // .Input(":Ref()") // .Output(":") // .Doc(R"( // <1-line summary> // // : // : // )"); // // Note: .Doc() should be last. // For details, see the OpDefBuilder class in op_def_builder.h. // OpDefBuilderWrapper is a templated class that is used in the REGISTER_OP // calls. This allows the result of REGISTER_OP to be used in chaining, as in // REGISTER_OP(a).Attr("...").Input("...");, while still allowing selective // registration to turn the entire call-chain into a no-op. // Template specialization that forwards all calls to the contained builder. @Name("tensorflow::register_op::OpDefBuilderWrapper") @NoOffset public static class TrueOpDefBuilderWrapper extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TrueOpDefBuilderWrapper(Pointer p) { super(p); } public TrueOpDefBuilderWrapper(@Cast("const char*") BytePointer name) { super((Pointer)null); allocate(name); } private native void allocate(@Cast("const char*") BytePointer name); public TrueOpDefBuilderWrapper(String name) { super((Pointer)null); allocate(name); } private native void allocate(String name); public native @ByRef TrueOpDefBuilderWrapper Attr(@StdString BytePointer spec); public native @ByRef TrueOpDefBuilderWrapper Attr(@StdString String spec); public native @ByRef TrueOpDefBuilderWrapper Input(@StdString BytePointer spec); public native @ByRef TrueOpDefBuilderWrapper Input(@StdString String spec); public native @ByRef TrueOpDefBuilderWrapper Output(@StdString BytePointer spec); public native @ByRef TrueOpDefBuilderWrapper Output(@StdString String spec); public native @ByRef TrueOpDefBuilderWrapper SetIsCommutative(); public native @ByRef TrueOpDefBuilderWrapper SetIsAggregate(); public native @ByRef TrueOpDefBuilderWrapper SetIsStateful(); public native @ByRef TrueOpDefBuilderWrapper SetAllowsUninitializedInput(); public native @ByRef TrueOpDefBuilderWrapper Deprecated(int version, @StdString BytePointer explanation); public native @ByRef TrueOpDefBuilderWrapper Deprecated(int version, @StdString String explanation); public native @ByRef TrueOpDefBuilderWrapper Doc(@StdString BytePointer text); public native @ByRef TrueOpDefBuilderWrapper Doc(@StdString String text); public static class Fn_InferenceContext extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Fn_InferenceContext(Pointer p) { super(p); } protected Fn_InferenceContext() { allocate(); } private native void allocate(); public native @ByVal Status call(InferenceContext arg0); } public native @ByRef TrueOpDefBuilderWrapper SetShapeFn( Fn_InferenceContext fn); public native @Const @ByRef OpDefBuilder builder(); } // Template specialization that turns all calls into no-ops. @Name("tensorflow::register_op::OpDefBuilderWrapper") public static class FalseOpDefBuilderWrapper extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FalseOpDefBuilderWrapper(Pointer p) { super(p); } public FalseOpDefBuilderWrapper(@Cast("const char*") BytePointer name) { super((Pointer)null); allocate(name); } private native void allocate(@Cast("const char*") BytePointer name); public FalseOpDefBuilderWrapper(String name) { super((Pointer)null); allocate(name); } private native void allocate(String name); public native @ByRef FalseOpDefBuilderWrapper Attr(@StringPiece BytePointer spec); public native @ByRef FalseOpDefBuilderWrapper Attr(@StringPiece String spec); public native @ByRef FalseOpDefBuilderWrapper Input(@StringPiece BytePointer spec); public native @ByRef FalseOpDefBuilderWrapper Input(@StringPiece String spec); public native @ByRef FalseOpDefBuilderWrapper Output(@StringPiece BytePointer spec); public native @ByRef FalseOpDefBuilderWrapper Output(@StringPiece String spec); public native @ByRef FalseOpDefBuilderWrapper SetIsCommutative(); public native @ByRef FalseOpDefBuilderWrapper SetIsAggregate(); public native @ByRef FalseOpDefBuilderWrapper SetIsStateful(); public native @ByRef FalseOpDefBuilderWrapper SetAllowsUninitializedInput(); public native @ByRef FalseOpDefBuilderWrapper Deprecated(int arg0, @StringPiece BytePointer arg1); public native @ByRef FalseOpDefBuilderWrapper Deprecated(int arg0, @StringPiece String arg1); public native @ByRef FalseOpDefBuilderWrapper Doc(@StringPiece BytePointer text); public native @ByRef FalseOpDefBuilderWrapper Doc(@StringPiece String text); public static class Fn_InferenceContext extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Fn_InferenceContext(Pointer p) { super(p); } protected Fn_InferenceContext() { allocate(); } private native void allocate(); public native @ByVal Status call(InferenceContext arg0); } public native @ByRef FalseOpDefBuilderWrapper SetShapeFn( Fn_InferenceContext fn); } @Namespace("tensorflow::register_op") public static class OpDefBuilderReceiver extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OpDefBuilderReceiver(Pointer p) { super(p); } // To call OpRegistry::Global()->Register(...), used by the // REGISTER_OP macro below. // Note: These are implicitly converting constructors. public OpDefBuilderReceiver( @Const @ByRef TrueOpDefBuilderWrapper wrapper) { super((Pointer)null); allocate(wrapper); } private native void allocate( @Const @ByRef TrueOpDefBuilderWrapper wrapper); // NOLINT(runtime/explicit) public OpDefBuilderReceiver(@Const @ByRef FalseOpDefBuilderWrapper arg0) { super((Pointer)null); allocate(arg0); } private native void allocate(@Const @ByRef FalseOpDefBuilderWrapper arg0); // NOLINT(runtime/explicit) } // namespace register_op // #define REGISTER_OP(name) REGISTER_OP_UNIQ_HELPER(__COUNTER__, name) // #define REGISTER_OP_UNIQ_HELPER(ctr, name) REGISTER_OP_UNIQ(ctr, name) // #define REGISTER_OP_UNIQ(ctr, name) // static ::tensorflow::register_op::OpDefBuilderReceiver register_op##ctr // TF_ATTRIBUTE_UNUSED = // ::tensorflow::register_op::OpDefBuilderWrapper(name) // The `REGISTER_SYSTEM_OP()` macro acts as `REGISTER_OP()` except // that the op is registered unconditionally even when selective // registration is used. // #define REGISTER_SYSTEM_OP(name) // REGISTER_SYSTEM_OP_UNIQ_HELPER(__COUNTER__, name) // #define REGISTER_SYSTEM_OP_UNIQ_HELPER(ctr, name) // REGISTER_SYSTEM_OP_UNIQ(ctr, name) // #define REGISTER_SYSTEM_OP_UNIQ(ctr, name) // static ::tensorflow::register_op::OpDefBuilderReceiver register_op##ctr // TF_ATTRIBUTE_UNUSED = // ::tensorflow::register_op::OpDefBuilderWrapper(name) // namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_OP_H_ // Parsed from tensorflow/core/graph/edgeset.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_GRAPH_EDGESET_H_ // #define TENSORFLOW_GRAPH_EDGESET_H_ // #include // #include // #include "tensorflow/core/platform/macros.h" // #include "tensorflow/core/platform/types.h" // #include "tensorflow/core/platform/logging.h" // An unordered set of edges. Uses very little memory for small sets. // Unlike std::set, EdgeSet does NOT allow mutations during iteration. @Namespace("tensorflow") @NoOffset public static class EdgeSet extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EdgeSet(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public EdgeSet(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public EdgeSet position(long position) { return (EdgeSet)super.position(position); } public EdgeSet() { super((Pointer)null); allocate(); } private native void allocate(); @Name("const_iterator") @Opaque public static class EdgeSetIterator extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public EdgeSetIterator() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EdgeSetIterator(Pointer p) { super(p); } } public native @Cast("bool") boolean empty(); public native @Cast("tensorflow::EdgeSet::size_type") long size(); public native void clear(); public native @ByVal EdgeSetBoolPair insert(@Cast("tensorflow::EdgeSet::value_type") Edge value); public native @Cast("tensorflow::EdgeSet::size_type") long erase(@Cast("tensorflow::EdgeSet::key_type") Edge key); // Caller is not allowed to mutate the EdgeSet while iterating. public native @ByVal EdgeSetIterator begin(); public native @ByVal EdgeSetIterator end(); } @Name("tensorflow::EdgeSet::const_iterator") @NoOffset public static class EdgeSetIterator extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EdgeSetIterator(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public EdgeSetIterator(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public EdgeSetIterator position(long position) { return (EdgeSetIterator)super.position(position); } public EdgeSetIterator() { super((Pointer)null); allocate(); } private native void allocate(); public native @ByRef @Name("operator ++") EdgeSetIterator increment(); public native @ByVal @Name("operator ++") EdgeSetIterator increment(int arg0); public native @Cast("const tensorflow::EdgeSet::value_type*") @Name("operator ->") PointerPointer access(); public native @Cast("tensorflow::EdgeSet::value_type") @Name("operator *") Edge multiply(); public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef EdgeSetIterator other); public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef EdgeSetIterator other); } // gcc's set and multiset always use const_iterator since it will otherwise // allow modification of keys. // gcc's set and multiset always use const_iterator since it will otherwise // allow modification of keys. // namespace tensorflow // #endif // TENSORFLOW_GRAPH_EDGESET_H_ // Parsed from tensorflow/core/lib/gtl/iterator_range.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // This provides a very simple, boring adaptor for a begin and end iterator // into a range type. This should be used to build range views that work well // with range based for loops and range based constructors. // // Note that code here follows more standards-based coding conventions as it // is mirroring proposed interfaces for standardization. // // Converted from chandlerc@'s code to Google style by joshl@. // #ifndef TENSORFLOW_LIB_GTL_ITERATOR_RANGE_H_ // #define TENSORFLOW_LIB_GTL_ITERATOR_RANGE_H_ // #include // A range adaptor for a pair of iterators. // // This just wraps two iterators into a range-compatible interface. Nothing // fancy at all. @Name("tensorflow::gtl::iterator_range") @NoOffset public static class NeighborIterRange extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NeighborIterRange(Pointer p) { super(p); } public NeighborIterRange(@ByVal NeighborIter begin_iterator, @ByVal NeighborIter end_iterator) { super((Pointer)null); allocate(begin_iterator, end_iterator); } private native void allocate(@ByVal NeighborIter begin_iterator, @ByVal NeighborIter end_iterator); public native @ByVal NeighborIter begin(); public native @ByVal NeighborIter end(); } @Name("tensorflow::gtl::iterator_range") @NoOffset public static class NodeIterRange extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NodeIterRange(Pointer p) { super(p); } public NodeIterRange(@ByVal NodeIter begin_iterator, @ByVal NodeIter end_iterator) { super((Pointer)null); allocate(begin_iterator, end_iterator); } private native void allocate(@ByVal NodeIter begin_iterator, @ByVal NodeIter end_iterator); public native @ByVal NodeIter begin(); public native @ByVal NodeIter end(); } // Convenience function for iterating over sub-ranges. // // This provides a bit of syntactic sugar to make using sub-ranges // in for loops a bit easier. Analogous to std::make_pair(). // namespace gtl // namespace tensorflow // #endif // TENSORFLOW_LIB_GTL_ITERATOR_RANGE_H_ // Parsed from tensorflow/core/framework/function.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_FUNCTION_H_ // #define TENSORFLOW_CORE_FRAMEWORK_FUNCTION_H_ // #include // #include "tensorflow/core/framework/attr_value.pb.h" // #include "tensorflow/core/framework/attr_value_util.h" // #include "tensorflow/core/framework/function.pb.h" // #include "tensorflow/core/framework/node_def_util.h" // #include "tensorflow/core/framework/op.h" // #include "tensorflow/core/framework/selective_registration.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/gtl/flatmap.h" // #include "tensorflow/core/lib/hash/hash.h" // #include "tensorflow/core/platform/env.h" // #include "tensorflow/core/platform/macros.h" // #include "tensorflow/core/platform/mutex.h" // #include "tensorflow/core/platform/protobuf.h" @Namespace("tensorflow") @Opaque public static class CancellationManager extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public CancellationManager() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CancellationManager(Pointer p) { super(p); } } @Namespace("tensorflow") @Opaque public static class Rendezvous extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public Rendezvous() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Rendezvous(Pointer p) { super(p); } } // FunctionDefHelper::Create is a convenient helper to construct a // FunctionDef proto. // E.g., // FunctionDef my_func = FunctionDefHelper::Create( // "my_func_name", // {"x:T", "y:T" /* one string per argument */}, // {"z:T" /* one string per return value */}, // {"T: {float, double}" /* one string per attribute */}, // { // {{"o"}, "Mul", {"x", "y"}, {{"T", "$T"}}} // /* one entry per function node */ // }, // /* Mapping between function returns and function node outputs. */ // {{"z", "o:z"}}); // // For the old Function::Node approach, use FunctionDefHelper::Define() // E.g., // FunctionDef my_func = FunctionDefHelper::Define( // "my_func_name", // {"x:T", "y:T" /* one string per argument */}, // {"z:T" /* one string per return value */}, // {"T: {float, double}" /* one string per attribute */}, // { // {{"z"}, "Mul", {"x", "y"}, {{"T", "$T"}}} // /* one entry per function node */ // }); @Namespace("tensorflow") public static class FunctionDefHelper extends Pointer { static { Loader.load(); } /** Default native constructor. */ public FunctionDefHelper() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public FunctionDefHelper(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FunctionDefHelper(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public FunctionDefHelper position(long position) { return (FunctionDefHelper)super.position(position); } // AttrValueWrapper has copy constructors for the type T so that // it's easy to construct a simple AttrValue proto. // // If T is a string type (const char*, string, or StringPiece), and // it starts with "$", we construct a AttrValue of "placeholder". // // E.g., // std:: x = {"T", "$T"} // is a named attr value placeholder. @NoOffset public static class AttrValueWrapper extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AttrValueWrapper(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public AttrValueWrapper(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public AttrValueWrapper position(long position) { return (AttrValueWrapper)super.position(position); } public native @ByRef AttrValue proto(); public native AttrValueWrapper proto(AttrValue proto); public AttrValueWrapper() { super((Pointer)null); allocate(); } private native void allocate(); } // Constructs an AttrValue.func given the "name" and "attrs". public static native @ByVal AttrValueWrapper FunctionRef( @StdString BytePointer name, @ByVal @Cast("tensorflow::gtl::ArraySlice >*") StringAttrPairVector attrs); public static native @ByVal AttrValueWrapper FunctionRef( @StdString String name, @ByVal @Cast("tensorflow::gtl::ArraySlice >*") StringAttrPairVector attrs); public static native @ByVal AttrValueWrapper FunctionRef(@StdString BytePointer name); public static native @ByVal AttrValueWrapper FunctionRef(@StdString String name); // Node is used to construct FunctionDef.Node using initialization // lists. E.g., // Node n = {{"z"}, "Mul", {"x", "y"}, {{"T", "$T"}}}; // z = x * y public static class Node extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Node() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Node(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Node(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Node position(long position) { return (Node)super.position(position); } // When constructing a NodeDef, the first entry in ret is used as // the node name, the remaining values are ignored. public native @ByRef StringVector ret(); public native Node ret(StringVector ret); public native @StdString BytePointer op(); public native Node op(BytePointer op); public native @ByRef StringVector arg(); public native Node arg(StringVector arg); public native @ByRef @Cast("std::vector >*") StringAttrPairVector attr(); public native Node attr(StringAttrPairVector attr); public native @ByRef StringVector dep(); public native Node dep(StringVector dep); public native @ByVal NodeDef ToNodeDef(); } // The Create() function uses the new NodeDef field. `ret_def` // holds a mapping from the function output names from `out_def` to // the node outputs from `node_def`. public static native @ByVal FunctionDef Create(@StdString BytePointer function_name, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector in_def, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector out_def, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector attr_def, @ArraySlice Node node_def, @ByVal @Cast("tensorflow::gtl::ArraySlice >*") StringStringPairVector ret_def); public static native @ByVal FunctionDef Create(@StdString String function_name, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector in_def, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector out_def, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector attr_def, @ArraySlice Node node_def, @ByVal @Cast("tensorflow::gtl::ArraySlice >*") StringStringPairVector ret_def); // The two Define() functions use the old FunctionDef::Node field. // TODO(josh11b): Get rid of these and transition to the one above. public static native @ByVal FunctionDef Define(@StdString BytePointer function_name, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector arg_def, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector ret_def, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector attr_def, @ArraySlice Node node_def); public static native @ByVal FunctionDef Define(@StdString String function_name, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector arg_def, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector ret_def, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector attr_def, @ArraySlice Node node_def); // Defines an anonymous function. I.e., its name is not relevant. public static native @ByVal FunctionDef Define(@ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector arg_def, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector ret_def, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector attr_def, @ArraySlice Node node_def); // Helpers to construct a constant scalar. } // Instantiate a function. // // "fdef" encodes a TF function with some attrs in fdef.signature.attr // containing placeholders. InstantiateFunction binds these // placeholders and produces an instantiated function encoded in // "result.gdef". The value to substitute a placeholder is given by // "attr_values", which is a map from a placeholder name to an attr // value. // // InstantiateFunction calls "get_function" to find signatures of other // functions and primitive ops. // GetFunctionSignature(func name, opdef) returns OK if the func name is found // and opdef is filled with a pointer to the corresponding signature // (a OpDef proto). Otherwise, returns an error. @Namespace("tensorflow") public static class InstantiationResult extends Pointer { static { Loader.load(); } /** Default native constructor. */ public InstantiationResult() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public InstantiationResult(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstantiationResult(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public InstantiationResult position(long position) { return (InstantiationResult)super.position(position); } public native @ByRef DataTypeVector arg_types(); public native InstantiationResult arg_types(DataTypeVector arg_types); public native @ByRef DataTypeVector ret_types(); public native InstantiationResult ret_types(DataTypeVector ret_types); public native @StdVector NodeDef nodes(); public native InstantiationResult nodes(NodeDef nodes); } @Namespace("tensorflow") public static native @ByVal Status InstantiateFunction(@Const @ByRef FunctionDef fdef, @ByVal AttrSlice attr_values, @ByVal @Cast("tensorflow::GetFunctionSignature*") Pointer get_function, InstantiationResult result); // Returns a debug string for a function definition. // // The returned text is multiple-line. It is intended to be // human-readable rather than being friendly to parsers. It is _NOT_ // intended to be the canonical string representation of "func_def". // Particularly, it may not include all information presented in // "func_def" (e.g., comments, description of the function arguments, // etc.) @Namespace("tensorflow") public static native @StdString BytePointer DebugString(@Const @ByRef FunctionDef func_def); @Namespace("tensorflow") public static native @StdString BytePointer DebugString(@Const @ByRef GraphDef instantiated_func_def); @Namespace("tensorflow") public static native @StdString BytePointer DebugString(@ArraySlice NodeDef instantiated_func_nodes); // Returns a debug string for a top level graph (the main program and // its supporting functions defined in its library). @Namespace("tensorflow") public static native @StdString BytePointer DebugStringWhole(@Const @ByRef GraphDef gdef); // Returns true if f1 == f2. Compares all fields, including descriptions. Order // of NodeDefs doesn't matter. @Namespace("tensorflow") public static native @Cast("bool") boolean FunctionDefsEqual(@Const @ByRef FunctionDef f1, @Const @ByRef FunctionDef f2); // Return a hash of `fdef` that is consistent with FunctionDefsEqual method. // In other words, if two fdefs compare equal, their hash values will be the // same. @Namespace("tensorflow") public static native @Cast("tensorflow::uint64") long FunctionDefHash(@Const @ByRef FunctionDef fdef); @Namespace("tensorflow") public static class CallFrameInterface extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CallFrameInterface(Pointer p) { super(p); } public native @Cast("size_t") long num_args(); public native @Cast("size_t") long num_retvals(); public native @ByVal Status GetArg(int index, Tensor val); public native @ByVal Status SetRetval(int index, @Const @ByRef Tensor val); } // Represents a function call frame. I.e., the data structure used to // pass arguments to a function and retrieve its results. // // Runtime must arrange accesses to one FunctionCallFrame s.t. // 1. SetArgs() happens before any GetArg(); // 2. GetRetvals happens after all SetRetval(); @Namespace("tensorflow") @NoOffset public static class FunctionCallFrame extends CallFrameInterface { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FunctionCallFrame(Pointer p) { super(p); } public FunctionCallFrame(@ByVal @Cast("tensorflow::DataTypeSlice*") DataTypeVector arg_types, @ByVal @Cast("tensorflow::DataTypeSlice*") DataTypeVector ret_types) { super((Pointer)null); allocate(arg_types, ret_types); } private native void allocate(@ByVal @Cast("tensorflow::DataTypeSlice*") DataTypeVector arg_types, @ByVal @Cast("tensorflow::DataTypeSlice*") DataTypeVector ret_types); // Caller methods. public native @ByVal Status SetArgs(@ByVal TensorVector args); public native @ByVal Status GetRetvals(TensorVector rets); // Moves the return values from the frame to rets. If allow_dead_tensors is // false it will fail if any of the retvals do not have a value. public native @ByVal Status ConsumeRetvals(TensorVector rets, @Cast("bool") boolean allow_dead_tensors); public native @Cast("size_t") long num_args(); public native @Cast("size_t") long num_retvals(); // Callee methods. public native @ByVal Status GetArg(int index, Tensor val); public native @ByVal Status SetRetval(int index, @Const @ByRef Tensor val); } // Helper to maintain a map between function names in a given // FunctionDefLibrary and function definitions. // // This class is thread-safe. @Namespace("tensorflow") @NoOffset public static class FunctionLibraryDefinition extends OpRegistryInterface { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FunctionLibraryDefinition(Pointer p) { super(p); } // Note: This constructor grabs `lib_def`'s lock in shared mode. public FunctionLibraryDefinition(@Const @ByRef FunctionLibraryDefinition lib_def) { super((Pointer)null); allocate(lib_def); } private native void allocate(@Const @ByRef FunctionLibraryDefinition lib_def); public FunctionLibraryDefinition(@Const OpRegistryInterface default_registry, @Const @ByRef FunctionDefLibrary lib_def) { super((Pointer)null); allocate(default_registry, lib_def); } private native void allocate(@Const OpRegistryInterface default_registry, @Const @ByRef FunctionDefLibrary lib_def); // Returns True if the library contains `func`, False otherwise. public native @Cast("bool") boolean Contains(@StdString BytePointer func); public native @Cast("bool") boolean Contains(@StdString String func); // Returns nullptr if "func" is not defined in "lib_def". Otherwise, // returns its definition proto. // // NB: This function returns a borrowed pointer, which can be invalidated by a // subsequent call to `ReplaceFunction()` with the given name. public native @Const FunctionDef Find(@StdString BytePointer func); public native @Const FunctionDef Find(@StdString String func); // Adds function definition 'fdef' to this function library. // Returns status 'ok' on success, or error otherwise. This is a no-op if // 'fdef' already exists in this function library. // If 'fdef' is successfully added to the library, it will be accessible // from 'LookUp' and included in the proto returned by 'ToProto'. // This operation is atomic. public native @ByVal Status AddFunctionDef(@Const @ByRef FunctionDef fdef); // Adds gradient definition 'grad' to this function library. // This is a no-op if 'grad' already exists in this function library. // If 'grad' is successfully added, it will be accessible via 'FindGradient' // and included in the proto returned by 'ToProto'. // This operation is atomic. public native @ByVal Status AddGradientDef(@Const @ByRef GradientDef grad); // Replaces the function corresponding to `func` with `fdef`. Returns // a non-OK status if "func" was not found in the library, OK otherwise. public native @ByVal Status ReplaceFunction(@StdString BytePointer func, @Const @ByRef FunctionDef fdef); public native @ByVal Status ReplaceFunction(@StdString String func, @Const @ByRef FunctionDef fdef); // Replaces the gradient corresponding to `grad.function_name()`. Returns // a non-OK status if "grad.function_name()" was not found in the library, OK // otherwise. public native @ByVal Status ReplaceGradient(@Const @ByRef GradientDef grad); // Adds the functions and gradients in 'other' to this function library. // Duplicate functions and gradients are ignored. // This operation is atomic. public native @ByVal Status AddLibrary(@Const @ByRef FunctionLibraryDefinition other); // Adds the functions and gradients in 'lib_def' to this function library. // Duplicate functions and gradients are ignored. // This operation is atomic. public native @ByVal Status AddLibrary(@Const @ByRef FunctionDefLibrary lib_def); // If the gradient function for 'func' is specified explicitly in // the library, returns the gradient function name. Otherwise, // returns an empty string. public native @StdString BytePointer FindGradient(@StdString BytePointer func); public native @StdString String FindGradient(@StdString String func); // OpRegistryInterface method. Useful for constructing a Graph. // // If "op" is defined in the library, returns its signature. // Otherwise, assume "op" is a primitive op and returns its op // signature and shape inference function. // // NB: This function outputs a borrowed pointer, which can be invalidated by a // subsequent call to `ReplaceFunction()` with the given name. public native @ByVal Status LookUp(@StdString BytePointer op_type_name, @Cast("const tensorflow::OpRegistrationData**") PointerPointer op_reg_data); public native @ByVal Status LookUp(@StdString BytePointer op_type_name, @Const @ByPtrPtr OpRegistrationData op_reg_data); public native @ByVal Status LookUp(@StdString String op_type_name, @Const @ByPtrPtr OpRegistrationData op_reg_data); // Generates new function name with the specified prefix that is unique // across this library. public native @StdString BytePointer UniqueFunctionName(@StringPiece BytePointer prefix); public native @StdString String UniqueFunctionName(@StringPiece String prefix); // Ops created for function arguments bear the name given by `kArgOp`; those // created for return values bear the name given by `kRetOp`. @MemberGetter public static native @Cast("const char*") BytePointer kArgOp(); @MemberGetter public static native @Cast("const char*") BytePointer kRetOp(); @MemberGetter public static native @Cast("const char*") BytePointer kGradientOp(); @MemberGetter public static native @Cast("const char*") BytePointer kFuncAttr(); // Given a node def 'ndef', inspects attributes of the callee // function to derive the attribute 'value' for 'attr'. Returns OK // iff the attribute is given by the function's definition. // TODO(irving): Remove; keep only the const Node& version. // Given a node, inspects attributes of the callee function to derive the // attribute 'value' for 'attr'. Returns OK iff the attribute is given by the // function's definition. // Returns a proto representation of the state of this function library. public native @ByVal FunctionDefLibrary ToProto(); public native @Cast("size_t") long num_functions(); public native @Const OpRegistryInterface default_registry(); } // Forward declare. Defined in common_runtime/function.h @Namespace("tensorflow") @Opaque public static class FunctionBody extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public FunctionBody() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FunctionBody(Pointer p) { super(p); } } // Forward declare. Defined in common_runtime/device.h // Forward declare. Defined in common_runtime/device_mgr.h @Namespace("tensorflow") public static class FunctionLibraryRuntime extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FunctionLibraryRuntime(Pointer p) { super(p); } // Instantiate a function with the given "attrs". // // Returns OK and fills in "handle" if the instantiation succeeds. // Otherwise returns an error and "handle" is undefined. public static class InstantiateOptions extends Pointer { static { Loader.load(); } /** Default native constructor. */ public InstantiateOptions() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public InstantiateOptions(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstantiateOptions(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public InstantiateOptions position(long position) { return (InstantiateOptions)super.position(position); } // The canonical device name of the device on which the function // should be instantiated. If empty, the function will be // instantiated on the local device. public native @StdString BytePointer target(); public native InstantiateOptions target(BytePointer target); // This interface is EXPERIMENTAL and subject to change. // // If non-null, the runtime will use `overlay_lib` to resolve // function(s) named in `function_name` and `attrs`. Otherwise, // the runtime will use its internal library. // NOTE(mrry): If provided, all functions defined in `overlay_lib` // must be self-contained, and cannot refer to functions defined // in other libraries. // TODO(mrry): Provide a mechanism for sharing core functions // between a set of libraries (e.g. by allowing a // `FunctionLibraryDefinition` to store an `outer_scope` pointer // and implementing name resolution across libraries). @MemberGetter public native @Const FunctionLibraryDefinition overlay_lib(); // This interface is EXPERIMENTAL and subject to change. // // If non-empty, the runtime will use `state_handle` to identify // cached state related the instantiated function. Two functions // of the same name and attrs, instantiated with the same // `state_handle` will have the same handle and share the same // state (in stateful kernels); and two functions with different // values for `state_handle` will have independent state. public native @StdString BytePointer state_handle(); public native InstantiateOptions state_handle(BytePointer state_handle); // This interface is EXPERIMENTAL and subject to change. // // Instantiates the function using an executor of the given type. If empty, // the default TensorFlow executor will be used. public native @StdString BytePointer executor_type(); public native InstantiateOptions executor_type(BytePointer executor_type); // If true, the runtime will attempt to create kernels for the function at // instantiation time, rather than on the first run. This can be used to // surface errors earlier. public native @Cast("bool") boolean create_kernels_eagerly(); public native InstantiateOptions create_kernels_eagerly(boolean create_kernels_eagerly); } public native @ByVal Status Instantiate(@StdString BytePointer function_name, @ByVal AttrSlice attrs, @Const @ByRef InstantiateOptions options, @Cast("tensorflow::FunctionLibraryRuntime::Handle*") LongPointer handle); public native @ByVal Status Instantiate(@StdString String function_name, @ByVal AttrSlice attrs, @Const @ByRef InstantiateOptions options, @Cast("tensorflow::FunctionLibraryRuntime::Handle*") LongBuffer handle); public native @ByVal Status Instantiate(@StdString BytePointer function_name, @ByVal AttrSlice attrs, @Const @ByRef InstantiateOptions options, @Cast("tensorflow::FunctionLibraryRuntime::Handle*") long... handle); public native @ByVal Status Instantiate(@StdString String function_name, @ByVal AttrSlice attrs, @Const @ByRef InstantiateOptions options, @Cast("tensorflow::FunctionLibraryRuntime::Handle*") LongPointer handle); public native @ByVal Status Instantiate(@StdString BytePointer function_name, @ByVal AttrSlice attrs, @Const @ByRef InstantiateOptions options, @Cast("tensorflow::FunctionLibraryRuntime::Handle*") LongBuffer handle); public native @ByVal Status Instantiate(@StdString String function_name, @ByVal AttrSlice attrs, @Const @ByRef InstantiateOptions options, @Cast("tensorflow::FunctionLibraryRuntime::Handle*") long... handle); public native @ByVal Status Instantiate(@StdString BytePointer function_name, @ByVal AttrSlice attrs, @Cast("tensorflow::FunctionLibraryRuntime::Handle*") LongPointer handle); public native @ByVal Status Instantiate(@StdString String function_name, @ByVal AttrSlice attrs, @Cast("tensorflow::FunctionLibraryRuntime::Handle*") LongBuffer handle); public native @ByVal Status Instantiate(@StdString BytePointer function_name, @ByVal AttrSlice attrs, @Cast("tensorflow::FunctionLibraryRuntime::Handle*") long... handle); public native @ByVal Status Instantiate(@StdString String function_name, @ByVal AttrSlice attrs, @Cast("tensorflow::FunctionLibraryRuntime::Handle*") LongPointer handle); public native @ByVal Status Instantiate(@StdString BytePointer function_name, @ByVal AttrSlice attrs, @Cast("tensorflow::FunctionLibraryRuntime::Handle*") LongBuffer handle); public native @ByVal Status Instantiate(@StdString String function_name, @ByVal AttrSlice attrs, @Cast("tensorflow::FunctionLibraryRuntime::Handle*") long... handle); // Releases state associated with the handle. public native @ByVal Status ReleaseHandle(@Cast("tensorflow::FunctionLibraryRuntime::Handle") long handle); // Returns the function body for the instantiated function given its // handle 'h'. Returns nullptr if "h" is not found. // // *this keeps the ownership of the returned object, which remains alive // as long as *this. public native @Const FunctionBody GetFunctionBody(@Cast("tensorflow::FunctionLibraryRuntime::Handle") long h); // Asynchronously invokes the instantiated function identified by // "handle". // // If function execution succeeds, "done" is called with OK and // "*rets" is filled with the function's return values. Otheriwse, // "done" is called with an error status. // // Does not take ownership of "rets". // In the cross-process scenario, runner isn't used for making the Async // RPC calls. public static class Options extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Options() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Options(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Options(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Options position(long position) { return (Options)super.position(position); } // The id of the step that is calling this function. public native @Cast("tensorflow::int64") long step_id(); public native Options step_id(long step_id); public native Rendezvous rendezvous(); public native Options rendezvous(Rendezvous rendezvous); public native CancellationManager cancellation_manager(); public native Options cancellation_manager(CancellationManager cancellation_manager); public native CollectiveExecutor collective_executor(); public native Options collective_executor(CollectiveExecutor collective_executor); public native ScopedStepContainer step_container(); public native Options step_container(ScopedStepContainer step_container); public native StepStatsCollectorInterface stats_collector(); public native Options stats_collector(StepStatsCollectorInterface stats_collector); public native @Cast("std::function)>*") Pointer runner(); public native Options runner(Pointer runner); // Parameters for remote function execution. public native @Cast("bool") boolean remote_execution(); public native Options remote_execution(boolean remote_execution); public native @StdString BytePointer source_device(); public native Options source_device(BytePointer source_device); // Fully specified device name. // Allocator attributes specifying where the args are / rets should be put. // These should either be {} or match the length of args / retvals. If {}, // the default allocator attributes will be assumed for all args / retvals. public native @StdVector AllocatorAttributes args_alloc_attrs(); public native Options args_alloc_attrs(AllocatorAttributes args_alloc_attrs); public native @StdVector AllocatorAttributes rets_alloc_attrs(); public native Options rets_alloc_attrs(AllocatorAttributes rets_alloc_attrs); // If true, we create a new IntraProcessRendezvous, else use the existing // one. public native @Cast("bool") boolean create_rendezvous(); public native Options create_rendezvous(boolean create_rendezvous); // If True, allow returning dead tensors. public native @Cast("bool") boolean allow_dead_tensors(); public native Options allow_dead_tensors(boolean allow_dead_tensors); } public native void Run(@Const @ByRef Options opts, @Cast("tensorflow::FunctionLibraryRuntime::Handle") long handle, @ByVal TensorVector args, TensorVector rets, @ByVal @Cast("tensorflow::FunctionLibraryRuntime::DoneCallback*") Pointer done); public native void Run(@Const @ByRef Options opts, @Cast("tensorflow::FunctionLibraryRuntime::Handle") long handle, CallFrameInterface call_frame, @ByVal @Cast("tensorflow::FunctionLibraryRuntime::DoneCallback*") Pointer done); // Creates a "kernel" for the given node def "ndef". // // If succeeds, returns OK and the caller takes the ownership of the // returned "*kernel". Otherwise, returns an error. public native @ByVal Status CreateKernel(@Const @ByRef NodeDef ndef, @Cast("tensorflow::OpKernel**") PointerPointer kernel); public native @ByVal Status CreateKernel(@Const @ByRef NodeDef ndef, @ByPtrPtr OpKernel kernel); // Returns true iff the function named `function_name` is stateful. // NOTE(mrry): This method assumes that the runtime is associated with a // default function library, and looks up `function_name` in that library. // It does not support overlay libraries. public native @Cast("bool") boolean IsStateful(@StdString BytePointer function_name); public native @Cast("bool") boolean IsStateful(@StdString String function_name); // Returns the device on which the function executes. public native Device device(); // Get the DeviceMgr from which the device was obtained. public native @Const DeviceMgr device_mgr(); // Returns the function library definition that backs this runtime. // NOTE(mrry): The returned library definition is the default function library // for this runtime. The runtime may instantiate functions from separate // overlay libraries, which are not returned by this function. public native @Const FunctionLibraryDefinition GetFunctionLibraryDefinition(); // Returns the environment on which the function executes. public native Env env(); // Returns a debug string showing the definition of the function of // 'handle'. public native @StdString BytePointer DebugString(@Cast("tensorflow::FunctionLibraryRuntime::Handle") long handle); // Returns the graph version number. public native int graph_def_version(); public native @ByVal Status Clone(@UniquePtr FunctionLibraryDefinition out_lib_def, @UniquePtr ProcessFunctionLibraryRuntime out_pflr, @Cast("tensorflow::FunctionLibraryRuntime**") PointerPointer out_flr); public native @ByVal Status Clone(@UniquePtr FunctionLibraryDefinition out_lib_def, @UniquePtr ProcessFunctionLibraryRuntime out_pflr, @ByPtrPtr FunctionLibraryRuntime out_flr); } // Returns a canonicalized string for the instantiation of the // function of the given "name", attributes "attrs", and "options". // // The returned string is guaranteed to be stable within one address // space. But it may be change as the implementation // evolves. Therefore, it should not be persisted or compared across // address spaces. @Namespace("tensorflow") public static native @StdString BytePointer Canonicalize(@StdString BytePointer funcname, @ByVal AttrSlice attrs, @Const @ByRef FunctionLibraryRuntime.InstantiateOptions options); @Namespace("tensorflow") public static native @StdString String Canonicalize(@StdString String funcname, @ByVal AttrSlice attrs, @Const @ByRef FunctionLibraryRuntime.InstantiateOptions options); @Namespace("tensorflow") public static native @StdString BytePointer Canonicalize(@StdString BytePointer funcname, @ByVal AttrSlice attrs); @Namespace("tensorflow") public static native @StdString String Canonicalize(@StdString String funcname, @ByVal AttrSlice attrs); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::FunctionLibraryRuntime::Handle") long kInvalidHandle(); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::FunctionLibraryRuntime::LocalHandle") long kInvalidLocalHandle(); // Used to instantiate and run functions in a distributed system. @Namespace("tensorflow") public static class DistributedFunctionLibraryRuntime extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DistributedFunctionLibraryRuntime(Pointer p) { super(p); } // The _target attr in attrs determines where the function is instantiated. public native @ByVal Status Instantiate( @StdString BytePointer function_name, @Const @ByRef FunctionLibraryDefinition lib_def, @ByVal AttrSlice attrs, @Const @ByRef FunctionLibraryRuntime.InstantiateOptions options, @Cast("tensorflow::FunctionLibraryRuntime::LocalHandle*") LongPointer handle); public native @ByVal Status Instantiate( @StdString String function_name, @Const @ByRef FunctionLibraryDefinition lib_def, @ByVal AttrSlice attrs, @Const @ByRef FunctionLibraryRuntime.InstantiateOptions options, @Cast("tensorflow::FunctionLibraryRuntime::LocalHandle*") LongBuffer handle); public native @ByVal Status Instantiate( @StdString BytePointer function_name, @Const @ByRef FunctionLibraryDefinition lib_def, @ByVal AttrSlice attrs, @Const @ByRef FunctionLibraryRuntime.InstantiateOptions options, @Cast("tensorflow::FunctionLibraryRuntime::LocalHandle*") long... handle); public native @ByVal Status Instantiate( @StdString String function_name, @Const @ByRef FunctionLibraryDefinition lib_def, @ByVal AttrSlice attrs, @Const @ByRef FunctionLibraryRuntime.InstantiateOptions options, @Cast("tensorflow::FunctionLibraryRuntime::LocalHandle*") LongPointer handle); public native @ByVal Status Instantiate( @StdString BytePointer function_name, @Const @ByRef FunctionLibraryDefinition lib_def, @ByVal AttrSlice attrs, @Const @ByRef FunctionLibraryRuntime.InstantiateOptions options, @Cast("tensorflow::FunctionLibraryRuntime::LocalHandle*") LongBuffer handle); public native @ByVal Status Instantiate( @StdString String function_name, @Const @ByRef FunctionLibraryDefinition lib_def, @ByVal AttrSlice attrs, @Const @ByRef FunctionLibraryRuntime.InstantiateOptions options, @Cast("tensorflow::FunctionLibraryRuntime::LocalHandle*") long... handle); // opts.runner isn't used for execution. public native void Run(@Const @ByRef FunctionLibraryRuntime.Options opts, @Cast("tensorflow::FunctionLibraryRuntime::LocalHandle") long handle, @ByVal TensorVector args, TensorVector rets, @ByVal @Cast("tensorflow::FunctionLibraryRuntime::DoneCallback*") Pointer done); } // Extracts the actual type from "attr_values" based on its definition // "arg_def". // // If "arg_def" is a N*T type, *is_type_list is set to false, and // *dtypes is set to be a vector of size N and each element is T. // // If "arg_def" is a list(type), *is_type_list is set to true, and // *dtypes is set to be a vector of types specified in attrs for // arg_def. // // Otherwise (arg_def is a simple type T), *is_type_list is set to // false, and *dtypes is set to a single element vector, whose only // element is T. @Namespace("tensorflow") public static native @ByVal Status ArgNumType(@ByVal AttrSlice attrs, @Cast("const tensorflow::OpDef::ArgDef*") @ByRef OpDef_ArgDef arg_def, @Cast("bool*") BoolPointer is_type_list, DataTypeVector dtypes); @Namespace("tensorflow") public static native @ByVal Status ArgNumType(@ByVal AttrSlice attrs, @Cast("const tensorflow::OpDef::ArgDef*") @ByRef OpDef_ArgDef arg_def, @Cast("bool*") boolean[] is_type_list, DataTypeVector dtypes); // To register a gradient function for a builtin op, one should use // REGISTER_OP_GRADIENT(, ); // // Typically, the c++ grad factory is a plan function that can be // converted into ::tensorflow::gradient::Creator, which is // std::function. // // A ::tensorflow::gradient::Creator should populate in FunctionDef* with a // definition of a brain function which compute the gradient for the // when the is instantiated with the given attrs. // // E.g., // // Status MatMulGrad(const AttrSlice& attrs, FunctionDef* g) { // bool transpose_a; // TF_RETURN_IF_ERROR(attrs.Get("transpose_a", &transpose_a)); // bool transpose_b; // TF_RETURN_IF_ERROR(attrs.Get("transpose_b", &transpose_b)); // DataType dtype; // TF_RETURN_IF_ERROR(attrs.Get("dtype", &dtype)); // if (!transpose_a && !transpose_b) { // *g = FunctionDefHelper::Define( // "MatMulGrad", // {"x:T ", "y:T", "dz:T"}, // Inputs to this function // {"dx:T", "dy:T"}, // Outputs from this function // {"T: {float, double}"}, // Attributes needed by this function // { // {{"x_t"}, "Transpose", {"x"}, {{"T", "$T"}}}, // {{"y_t"}, "Transpose", {"y"}, {{"T", "$T"}}}, // {{"dx"}, "MatMul", {"dz", "y_t"}, {{"T", "$T"}}}, // {{"dy"}, "MatMul", {"x_", "dz"}, {{"T", "$T"}}}, // }); // } else { // ... ... // } // return Status::OK(); // } // // NOTE: $T is substituted with the type variable "T" when the // gradient function MatMul is instantiated. // // TODO(zhifengc): Better documentation somewhere. // Macros to define a gradient function factory for a primitive // operation. // #define REGISTER_OP_GRADIENT(name, fn) // REGISTER_OP_GRADIENT_UNIQ_HELPER(__COUNTER__, name, fn) // #define REGISTER_OP_NO_GRADIENT(name) // REGISTER_OP_GRADIENT_UNIQ_HELPER(__COUNTER__, name, nullptr) // #define REGISTER_OP_GRADIENT_UNIQ_HELPER(ctr, name, fn) // REGISTER_OP_GRADIENT_UNIQ(ctr, name, fn) // #define REGISTER_OP_GRADIENT_UNIQ(ctr, name, fn) // static bool unused_grad_##ctr TF_ATTRIBUTE_UNUSED = // SHOULD_REGISTER_OP_GRADIENT && // ::tensorflow::gradient::RegisterOp(name, fn) // Register a gradient creator for the "op". @Namespace("tensorflow::gradient") public static native @Cast("bool") boolean RegisterOp(@StdString BytePointer op, @ByVal @Cast("tensorflow::gradient::Creator*") Pointer func); @Namespace("tensorflow::gradient") public static native @Cast("bool") boolean RegisterOp(@StdString String op, @ByVal @Cast("tensorflow::gradient::Creator*") Pointer func); // Returns OK the gradient creator for the "op" is found (may be // nullptr if REGISTER_OP_NO_GRADIENT is used. @Namespace("tensorflow::gradient") public static native @ByVal Status GetOpGradientCreator(@StdString BytePointer op, @Cast("tensorflow::gradient::Creator*") Pointer creator); @Namespace("tensorflow::gradient") public static native @ByVal Status GetOpGradientCreator(@StdString String op, @Cast("tensorflow::gradient::Creator*") Pointer creator); // namespace gradient // Declare explicit instantiations of GetAttr // #define GET_ATTR(T) // extern template Status FunctionLibraryDefinition::GetAttr( // const Node&, const string&, T*) const; // extern template Status FunctionLibraryDefinition::GetAttr( // const NodeDef&, const string&, T*) const; // #undef GET_ATTR // end namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_FUNCTION_H_ // Parsed from tensorflow/core/util/device_name_utils.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_UTIL_DEVICE_NAME_UTILS_H_ // #define TENSORFLOW_CORE_UTIL_DEVICE_NAME_UTILS_H_ // #include // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/lib/core/stringpiece.h" // In TensorFlow a device name is a string of the following form: // /job:/replica:/task:/device:: // // is a short identifier conforming to the regexp // [a-zA-Z][_a-zA-Z]* // is a supported device type (e.g. 'cpu' or 'gpu') // , , are small non-negative integers and are // densely allocated (except in tests). // // For some purposes, we also allow device patterns, which can specify // some or none of the specific fields above, with missing components, // or ":*" indicating "any value allowed for that component. // // For example: // "/job:param_server" - Consider any devices in the "param_server" job // "/device:cpu:*" - Consider any cpu devices in any job/task/replica // "/job:*/replica:*/task:*/device:cpu:*" - Consider any cpu devices in any // job/task/replica // "/job:w/replica:0/task:0/device:gpu:*" - Consider any gpu devices in // replica 0, task 0, of job "w" @Namespace("tensorflow") public static class DeviceNameUtils extends Pointer { static { Loader.load(); } /** Default native constructor. */ public DeviceNameUtils() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public DeviceNameUtils(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DeviceNameUtils(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public DeviceNameUtils position(long position) { return (DeviceNameUtils)super.position(position); } // Returns a fully qualified device name given the parameters. public static native @StdString BytePointer FullName(@StdString BytePointer job, int replica, int task, @StdString BytePointer type, int id); public static native @StdString String FullName(@StdString String job, int replica, int task, @StdString String type, int id); public static class ParsedName extends Pointer { static { Loader.load(); } /** Default native constructor. */ public ParsedName() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public ParsedName(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ParsedName(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public ParsedName position(long position) { return (ParsedName)super.position(position); } public native void Clear(); public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef ParsedName other); public native @Cast("bool") boolean has_job(); public native ParsedName has_job(boolean has_job); public native @StdString BytePointer job(); public native ParsedName job(BytePointer job); public native @Cast("bool") boolean has_replica(); public native ParsedName has_replica(boolean has_replica); public native int replica(); public native ParsedName replica(int replica); public native @Cast("bool") boolean has_task(); public native ParsedName has_task(boolean has_task); public native int task(); public native ParsedName task(int task); public native @Cast("bool") boolean has_type(); public native ParsedName has_type(boolean has_type); public native @StdString BytePointer type(); public native ParsedName type(BytePointer type); public native @Cast("bool") boolean has_id(); public native ParsedName has_id(boolean has_id); public native int id(); public native ParsedName id(int id); } // Parses "fullname" into "*parsed". Returns true iff succeeds. public static native @Cast("bool") boolean ParseFullName(@StringPiece BytePointer fullname, ParsedName parsed); public static native @Cast("bool") boolean ParseFullName(@StringPiece String fullname, ParsedName parsed); // Canonicalizes "fullname" into "*canonical_name". Uses a fully specified // basename to fill in fields that are missing. Accepts both legacy, newer // and local versions of the device spec. Returns the newer version of the // device spec. If we were unable to interpret / parse "fullname" returns // an error and *canonical_name is set to "". public static native @ByVal Status CanonicalizeDeviceName(@StringPiece BytePointer fullname, @StringPiece BytePointer basename, @StdString @Cast({"char*", "std::string*"}) BytePointer canonical_name); public static native @ByVal Status CanonicalizeDeviceName(@StringPiece String fullname, @StringPiece String basename, @StdString @Cast({"char*", "std::string*"}) BytePointer canonical_name); // Returns true if "name" specifies any non-trivial constraint on the device. public static native @Cast("bool") boolean HasSomeDetails(@Const @ByRef ParsedName name); // Returns true if more_specific is a specification of // less_specific, i.e. everywhere that less-specific has a // non-wildcard component value, more_specific has the same value // for that component. public static native @Cast("bool") boolean IsSpecification(@Const @ByRef ParsedName less_specific, @Const @ByRef ParsedName more_specific); // Like IsSpecification, but the second argument "name" must have a // non-wildcard value for all of its components. public static native @Cast("bool") boolean IsCompleteSpecification(@Const @ByRef ParsedName pattern, @Const @ByRef ParsedName name); // True iff there exists any possible complete device name that is // a specification of both "a" and "b". public static native @Cast("bool") boolean AreCompatibleDevNames(@Const @ByRef ParsedName a, @Const @ByRef ParsedName b); // Merges the device specifications in "*target" and "other", and // stores the result in "*target". Returns OK if "*target" and // "other" are compatible, otherwise returns an error. public static native @ByVal Status MergeDevNames(ParsedName target, @Const @ByRef ParsedName other); public static native @ByVal Status MergeDevNames(ParsedName target, @Const @ByRef ParsedName other, @Cast("bool") boolean allow_soft_placement); // Returns true iff devices identified by 'src' and 'dst' are in the // same address space. public static native @Cast("bool") boolean IsSameAddressSpace(@StringPiece BytePointer src, @StringPiece BytePointer dst); public static native @Cast("bool") boolean IsSameAddressSpace(@StringPiece String src, @StringPiece String dst); public static native @Cast("bool") boolean IsSameAddressSpace(@Const @ByRef ParsedName src, @Const @ByRef ParsedName dst); // Returns the local device given its "type" and "id". public static native @StdString BytePointer LocalName(@StringPiece BytePointer type, int id); public static native @StdString String LocalName(@StringPiece String type, int id); // Returns a short local device name (cpu:0, gpu:1, etc) based on // the given fullname. public static native @StdString BytePointer LocalName(@StringPiece BytePointer fullname); public static native @StdString String LocalName(@StringPiece String fullname); // If "name" is a valid local device name (cpu:0, gpu:1, etc.), // fills in parsed.type and parsed.id accordingly. Returns true iff // succeeds. public static native @Cast("bool") boolean ParseLocalName(@StringPiece BytePointer name, ParsedName parsed); public static native @Cast("bool") boolean ParseLocalName(@StringPiece String name, ParsedName parsed); // Splits a fully-qualified device name into a task identifier and a // relative device identifier. It first parses "name" using // ParseFullName(), then assigns *task with everything except for // the local device component, and assigns the relative device // component into *device. This function will still return true if // the task component is empty, but it requires the relative device // component to be fully specified. public static native @Cast("bool") boolean SplitDeviceName(@StringPiece BytePointer name, @StdString @Cast({"char*", "std::string*"}) BytePointer task, @StdString @Cast({"char*", "std::string*"}) BytePointer device); public static native @Cast("bool") boolean SplitDeviceName(@StringPiece String name, @StdString @Cast({"char*", "std::string*"}) BytePointer task, @StdString @Cast({"char*", "std::string*"}) BytePointer device); public static native @StdString BytePointer ParsedNameToString(@Const @ByRef ParsedName pn); // Returns canonical and legacy full names for the given parsed // device name 'pn'. The returned string names are often useful to // look up devices from a mapping. public static native @ByVal StringVector GetNamesForDeviceMappings(@Const @ByRef ParsedName pn); // Returns canonical and legacy local names for the given parsed device name // 'pn'. The returned string names are often useful to look up devices from a // mapping. public static native @ByVal StringVector GetLocalNamesForDeviceMappings( @Const @ByRef ParsedName pn); } // namespace tensorflow // #endif // TENSORFLOW_CORE_UTIL_DEVICE_NAME_UTILS_H_ // Parsed from tensorflow/core/framework/device_base.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_DEVICE_BASE_H_ // #define TENSORFLOW_CORE_FRAMEWORK_DEVICE_BASE_H_ // #include // #include // #include // #include "absl/base/macros.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/lib/core/errors.h" // #include "tensorflow/core/lib/core/refcount.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/lib/core/stringpiece.h" // #include "tensorflow/core/platform/logging.h" // #ifdef TENSORFLOW_USE_SYCL // #endif // end namespace Eigen @Namespace("stream_executor") @Opaque public static class Stream extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public Stream() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Stream(Pointer p) { super(p); } } // namespace stream_executor @Namespace("tensorflow") @Opaque public static class EventMgr extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public EventMgr() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EventMgr(Pointer p) { super(p); } } @Namespace("tensorflow") @Opaque public static class ScopedAllocatorMgr extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public ScopedAllocatorMgr() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ScopedAllocatorMgr(Pointer p) { super(p); } } // A wrapper for an Eigen Gpu Device that includes per-op state. The // class is defined even for non-GPU devices since the // OpKernelContext::Params structure wants to fill it in. @Namespace("tensorflow") public static class PerOpGpuDevice extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PerOpGpuDevice(Pointer p) { super(p); } public native @Const @ByRef GpuDevice device(); } // A class that devices can subclass to pass around // Device-specific context to OpKernels. @Namespace("tensorflow") public static class DeviceContext extends Pointer { static { Loader.load(); } /** Default native constructor. */ public DeviceContext() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public DeviceContext(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DeviceContext(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public DeviceContext position(long position) { return (DeviceContext)super.position(position); } public native Stream stream(); public native void MaintainLifetimeOnStream(@Const Tensor t, Stream stream); // "cpu_tensor" is a tensor on a CPU. Copies "cpu_tensor" into // "device_tensor" which is on a GPU device "device". "device_tensor" // must be allocated to be of the same size as "cpu_tensor". public native void CopyCPUTensorToDevice(@Const Tensor cpu_tensor, Device device, Tensor device_tensor, @ByVal @Cast("tensorflow::StatusCallback*") Pointer done); // "device_tensor" is a tensor on a non-CPU device. Copies // device_tensor into "cpu_tensor". "cpu_tensor" must be allocated // to be of the same size as "device_tensor". public native void CopyDeviceTensorToCPU(@Const Tensor device_tensor, @StringPiece BytePointer tensor_name, Device device, Tensor cpu_tensor, @ByVal @Cast("tensorflow::StatusCallback*") Pointer done); public native void CopyDeviceTensorToCPU(@Const Tensor device_tensor, @StringPiece String tensor_name, Device device, Tensor cpu_tensor, @ByVal @Cast("tensorflow::StatusCallback*") Pointer done); // If possible, wait for all events on *stream to complete then execute func. // A non-OK Status is returned otherwise. The stream argument should be the // one provided by GpuDeviceInfo. This function is not applicable to devices // that don't provide such a value. public native @ByVal Status ThenExecute(Device device, Stream stream, @ByVal Fn func); } // map[i] is the DeviceContext* for the node with id i, if i < map.size(). @Namespace("tensorflow") @NoOffset public static class DeviceBase extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DeviceBase(Pointer p) { super(p); } public DeviceBase(Env env) { super((Pointer)null); allocate(env); } private native void allocate(Env env); public native Env env(); // Override this to return true for devices that require an Op's // compute method to save references to the temporary tensors it // allocates until the Op execution completes public native @Cast("bool") boolean RequiresRecordingAccessedTensors(); public static class CpuWorkerThreads extends Pointer { static { Loader.load(); } /** Default native constructor. */ public CpuWorkerThreads() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public CpuWorkerThreads(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CpuWorkerThreads(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public CpuWorkerThreads position(long position) { return (CpuWorkerThreads)super.position(position); } public native int num_threads(); public native CpuWorkerThreads num_threads(int num_threads); public native ThreadPool workers(); public native CpuWorkerThreads workers(ThreadPool workers); } // Does not take ownership. public native void set_tensorflow_cpu_worker_threads(CpuWorkerThreads t); public native @Const CpuWorkerThreads tensorflow_cpu_worker_threads(); // "stream" is used in special circumstances (such as the // constructors of Ops) where there is no available OpKernelContext. // "default_context" is used by OpKernelContext whenever a device does not // supply a DeviceContext for an op in FillContextMap (e.g. when only // using a single stream.) // "event_mgr" is used to delay deallocation of temporary GPU buffers. // TODO(pbar) Work out how to move this out of DeviceBase. // GpuDeviceInfo name is an unfortunate legacy, it is used not only by GPUs // but also by TPU devices (to provide default device context). public static class GpuDeviceInfo extends Pointer { static { Loader.load(); } /** Default native constructor. */ public GpuDeviceInfo() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public GpuDeviceInfo(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GpuDeviceInfo(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public GpuDeviceInfo position(long position) { return (GpuDeviceInfo)super.position(position); } // Make sure all the defaults are NULL, so we can spot missing assignments. public native Stream stream(); public native GpuDeviceInfo stream(Stream stream); public native DeviceContext default_context(); public native GpuDeviceInfo default_context(DeviceContext default_context); public native EventMgr event_mgr(); public native GpuDeviceInfo event_mgr(EventMgr event_mgr); public native int gpu_id(); public native GpuDeviceInfo gpu_id(int gpu_id); } // Does not take ownership. public native void set_tensorflow_gpu_device_info(GpuDeviceInfo g); public native @Const GpuDeviceInfo tensorflow_gpu_device_info(); // The preferred thread pool for this device. If it is nullptr, the system // automatically assigns a thread pool for execution. public native ThreadPool tensorflow_device_thread_pool(); // Does not take ownership. public native void set_eigen_cpu_device(ThreadPoolDevice d); // #ifdef TENSORFLOW_USE_SYCL // #endif // Return the Allocator implementation to use based on the allocator // attributes requested. See allocator.h for more details. public native Allocator GetAllocator(@ByVal AllocatorAttributes arg0); // This method is provided for backwards compatibility, and will be removed // in a future release. public native Allocator GetStepAllocator(@ByVal AllocatorAttributes attr, ResourceMgr arg1); // Return an Allocator prepared for use in particular places by graph // optimization public native Allocator GetScopedAllocator(@ByVal AllocatorAttributes attr, @Cast("tensorflow::int64") long step_id); public native ScopedAllocatorMgr GetScopedAllocatorMgr(); public native @Cast("bool") boolean has_eigen_cpu_device(); public native @Const ThreadPoolDevice eigen_cpu_device(); // #ifdef TENSORFLOW_USE_SYCL // #endif // Caller owns the return value. The OpKernelContext calls this even // for devices that do not implement an eigen_gpu_device. Overridden // by GPU devices to return a derived type. public native PerOpGpuDevice MakeGpuDevice(); public native DeviceBase UnderlyingDevice(); // This is overridden by GPU devices to reinitialize the derived // type returned by MakeGpuDevice. public native @ByVal Status ReinitializeGpuDevice(OpKernelContext arg0, PerOpGpuDevice arg1, DeviceContext arg2, Allocator arg3); // Unimplemented by default public native @Const @ByRef DeviceAttributes attributes(); public native @StdString BytePointer name(); // Materializes the given TensorProto into 'tensor' stored in Device // memory. Most devices will want to override this. // // TODO(vrv): We should be able to put this function into // OpKernelContext and handle the copies from device memory via send // and receive nodes, instead of requiring that each device handle // the copies here as well as in copy ops. public native @ByVal Status MakeTensorFromProto(@Const @ByRef TensorProto tensor_proto, @Const @ByVal AllocatorAttributes alloc_attrs, Tensor tensor); } // namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_DEVICE_BASE_H_ // Parsed from tensorflow/core/common_runtime/device.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // A Device is a something that can perform computations as part of a // model. Devices can be local (runs computation on this machine), or // remote (contacts a device local to another machine using an RPC to // do the work). Devices are registered in a DeviceSet, which is also // responsible for the Device <-> id mapping. // // Device names // * Every Device should have a unique name with the format: // /job:___/replica:___/task:___/(gpu|cpu):___ // An example name would be "/job:train/replica:0/task:3/device:GPU:2". // * Task numbers are within the specified replica, so there are as // many "task zeros" as replicas. // #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_DEVICE_H_ // #define TENSORFLOW_CORE_COMMON_RUNTIME_DEVICE_H_ // #include // #include // #include "tensorflow/core/framework/allocator.h" // #include "tensorflow/core/framework/control_flow.h" // #include "tensorflow/core/framework/device_attributes.pb_text.h" // #include "tensorflow/core/framework/device_attributes.pb.h" // #include "tensorflow/core/framework/device_base.h" // #include "tensorflow/core/framework/graph.pb.h" // #include "tensorflow/core/framework/op_kernel.h" // #include "tensorflow/core/framework/op_segment.h" // #include "tensorflow/core/framework/resource_mgr.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/graph/graph.h" // #include "tensorflow/core/graph/types.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/platform/macros.h" // #include "tensorflow/core/platform/types.h" // #include "tensorflow/core/util/device_name_utils.h" @Namespace("tensorflow") @NoOffset public static class Device extends DeviceBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Device(Pointer p) { super(p); } // Full name of this device (see top comment). public native @StdString BytePointer name(); // Parsed name of this device public native @Const @ByRef DeviceNameUtils.ParsedName parsed_name(); // Describes what kind of device this is. This is intended to be // human-readable and not computer-parsed, except that two devices // with the same device_type() are expected to perform similarly // (both from a computation and communication perspective). public native @StdString BytePointer device_type(); // Returns an aggregation of device attributes. public native @Const @ByRef DeviceAttributes attributes(); // Performs the actual compute function. // // Subclasses may override this function if they wish to perform // some initialization before each compute. public native void Compute(OpKernel op_kernel, OpKernelContext context); // Asynchronous kernel's compute. public native void ComputeAsync(AsyncOpKernel op_kernel, OpKernelContext context, @ByVal @Cast("tensorflow::AsyncOpKernel::DoneCallback*") Fn done); // Takes ownership of the references in tensors. If necessary, a // device may override this method to keep a reference to the // accessed tensors until the async computation has completed. public native void ConsumeListOfAccessedTensors( DeviceContext context, @Cast("const tensorflow::TensorReferenceVector*") @ByRef AllocatorAttributesVector tensors); // If true, and tracing is enabled, the `tracing::ScopedAnnotation()` tracing // mechanism will be used instead of `tracing::ScopedActivity()`. Some devices // may override this method to use annotations, which enable child activities // (such as GPU kernel launches) to be related to the OpKernel invocation. public native @Cast("bool") boolean TraceUsingAnnotations(); // Blocks until all operations queued on the device at the time of // the call have completed. Returns any error pending on the device // at completion. public native @ByVal Status Sync(); // Override this to return true for devices that require a Sync() call before // session completion. public native @Cast("bool") boolean RequiresSyncOnCompletion(); // Optionally modify the device's GraphDef before execution. // // This method should be considered experimental and is supplied to enable // prototyping of TensorFlow device implementations that need to modify // the GraphDef before execution. // // 'graph' supplies the partition of the graph assigned to this // device. public native @ByVal Status MaybeRewriteGraph(@UniquePtr Graph arg0); // Fill in the context map for the graph. Default behavior is to do // nothing. // // The caller takes ownership over the DeviceContext objects given // by the device. public native @ByVal Status FillContextMap(@Const Graph graph, @Cast("tensorflow::DeviceContextMap*") DeviceContextVector device_context_map); // Returns the op segment of this device. The caller can reuse op // kernels registered for the same session running on this device. public native OpSegment op_segment(); // Returns the resource manager associated w/ this device. public native ResourceMgr resource_manager(); // Returns the device manager that owns this device, or nullptr if this Device // is not owned by a device manager. public native DeviceMgr device_mgr(); // Summarizes the status of this Device, for debugging. public native @StdString BytePointer DebugString(); // Assembles the parameter components into a complete DeviceAttributes value. public static native @ByVal DeviceAttributes BuildDeviceAttributes( @StdString BytePointer name, @ByVal DeviceType device, @Cast("tensorflow::Bytes") long memory_limit, @Const @ByRef DeviceLocality locality, @StdString BytePointer physical_device_desc); public static native @ByVal DeviceAttributes BuildDeviceAttributes( @StdString String name, @ByVal DeviceType device, @Cast("tensorflow::Bytes") long memory_limit, @Const @ByRef DeviceLocality locality, @StdString String physical_device_desc); public static native @ByVal DeviceAttributes BuildDeviceAttributes( @StdString BytePointer name, @ByVal DeviceType device, @Cast("tensorflow::Bytes") long memory_limit, @Const @ByRef DeviceLocality locality); public static native @ByVal DeviceAttributes BuildDeviceAttributes( @StdString String name, @ByVal DeviceType device, @Cast("tensorflow::Bytes") long memory_limit, @Const @ByRef DeviceLocality locality); // Clears the resource manager associated with this device. public native void ClearResourceMgr(); } // namespace tensorflow // #endif // TENSORFLOW_CORE_COMMON_RUNTIME_DEVICE_H_ // Parsed from tensorflow/core/common_runtime/device_mgr.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_DEVICE_MGR_H_ // #define TENSORFLOW_CORE_COMMON_RUNTIME_DEVICE_MGR_H_ // #include // #include // #include // #include // #include "tensorflow/core/common_runtime/device.h" // #include "tensorflow/core/lib/core/arena.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/lib/core/stringpiece.h" // #include "tensorflow/core/lib/gtl/inlined_vector.h" // #include "tensorflow/core/platform/macros.h" @Namespace("tensorflow") @NoOffset public static class DeviceMgr extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DeviceMgr(Pointer p) { super(p); } // Takes ownership of each device in 'devices'. // TODO(zhifengc): Other initialization information. // TODO(b/37437134): Use std::unique_ptr's to track ownership. public DeviceMgr(@Const @ByRef DeviceVector devices) { super((Pointer)null); allocate(devices); } private native void allocate(@Const @ByRef DeviceVector devices); // Returns attributes of all devices. public native void ListDeviceAttributes(@StdVector DeviceAttributes devices); public native @ByVal DeviceVector ListDevices(); // Returns a string listing all devices. public native @StdString BytePointer DebugString(); // Returns a string of all the device mapping. public native @StdString BytePointer DeviceMappingString(); // Assigns *device with pointer to Device of the given name. // Accepts either a full device name, or just the replica-local suffix. public native @ByVal Status LookupDevice(@StringPiece BytePointer name, @Cast("tensorflow::Device**") PointerPointer device); public native @ByVal Status LookupDevice(@StringPiece BytePointer name, @ByPtrPtr Device device); public native @ByVal Status LookupDevice(@StringPiece String name, @ByPtrPtr Device device); // Clears given containers of all devices if 'container' is // non-empty. Otherwise, clears default containers of all devices. public native void ClearContainers(@ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector containers); public native int NumDeviceType(@StdString BytePointer type); public native int NumDeviceType(@StdString String type); } // namespace tensorflow // #endif // TENSORFLOW_CORE_COMMON_RUNTIME_DEVICE_MGR_H_ // Parsed from tensorflow/core/common_runtime/process_function_library_runtime.h /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_PROCESS_FUNCTION_LIBRARY_RUNTIME_H_ // #define TENSORFLOW_CORE_COMMON_RUNTIME_PROCESS_FUNCTION_LIBRARY_RUNTIME_H_ // #include // #include "tensorflow/core/common_runtime/device_mgr.h" // #include "tensorflow/core/framework/function.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/protobuf/config.pb.h" // A class that stores all the FunctionLibraryRuntime objects, one per device. @Namespace("tensorflow") @NoOffset public static class ProcessFunctionLibraryRuntime extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ProcessFunctionLibraryRuntime(Pointer p) { super(p); } // Creates FunctionLibraryRuntime objects for each device in the provided // DeviceMgr. Caller needs to make sure that device_mgr, lib_def and parent // (if provided) outlive this object. public ProcessFunctionLibraryRuntime( @Const DeviceMgr device_mgr, Env env, int graph_def_version, @Const FunctionLibraryDefinition lib_def, @Const @ByRef OptimizerOptions optimizer_options, ThreadPool thread_pool/*=nullptr*/, DistributedFunctionLibraryRuntime parent/*=nullptr*/) { super((Pointer)null); allocate(device_mgr, env, graph_def_version, lib_def, optimizer_options, thread_pool, parent); } private native void allocate( @Const DeviceMgr device_mgr, Env env, int graph_def_version, @Const FunctionLibraryDefinition lib_def, @Const @ByRef OptimizerOptions optimizer_options, ThreadPool thread_pool/*=nullptr*/, DistributedFunctionLibraryRuntime parent/*=nullptr*/); public ProcessFunctionLibraryRuntime( @Const DeviceMgr device_mgr, Env env, int graph_def_version, @Const FunctionLibraryDefinition lib_def, @Const @ByRef OptimizerOptions optimizer_options) { super((Pointer)null); allocate(device_mgr, env, graph_def_version, lib_def, optimizer_options); } private native void allocate( @Const DeviceMgr device_mgr, Env env, int graph_def_version, @Const FunctionLibraryDefinition lib_def, @Const @ByRef OptimizerOptions optimizer_options); // With `custom_kernel_creator`. public ProcessFunctionLibraryRuntime(@Const DeviceMgr device_mgr, Env env, int graph_def_version, @Const FunctionLibraryDefinition lib_def, @Const @ByRef OptimizerOptions optimizer_options, @ByVal @Cast("tensorflow::CustomKernelCreator*") Pointer custom_kernel_creator, ThreadPool thread_pool, DistributedFunctionLibraryRuntime parent) { super((Pointer)null); allocate(device_mgr, env, graph_def_version, lib_def, optimizer_options, custom_kernel_creator, thread_pool, parent); } private native void allocate(@Const DeviceMgr device_mgr, Env env, int graph_def_version, @Const FunctionLibraryDefinition lib_def, @Const @ByRef OptimizerOptions optimizer_options, @ByVal @Cast("tensorflow::CustomKernelCreator*") Pointer custom_kernel_creator, ThreadPool thread_pool, DistributedFunctionLibraryRuntime parent); // Sends `tensors_to_send` from `source_device` to `target_device` using // `rendezvous`. `key_prefix` is used as a prefix for the keys sent to the // Rendezvous. `device_context` should be the DeviceContext of the device // doing the sending. `alloc_attrs` should either be empty or be the size of // `tensors_to_send` and indicates how the input tensors are allocated. Method // takes references on each of the `tensors_to_send`. Method doesn't block. public static native @ByVal Status SendTensors(@StdString BytePointer source_device, @StdString BytePointer target_device, @StdString BytePointer key_prefix, @Cast("tensorflow::int64") long src_incarnation, @ByVal TensorVector tensors_to_send, DeviceContext device_context, @StdVector AllocatorAttributes alloc_attrs, Rendezvous rendezvous); public static native @ByVal Status SendTensors(@StdString String source_device, @StdString String target_device, @StdString String key_prefix, @Cast("tensorflow::int64") long src_incarnation, @ByVal TensorVector tensors_to_send, DeviceContext device_context, @StdVector AllocatorAttributes alloc_attrs, Rendezvous rendezvous); // Receives `received_tensors` from `target_device` (originally sent from // `source_device`) using `rendezvous`. Uses `key_prefix` to construct the // keys to be retrieved. `device_context` should be for the device receiving // the tensors. `alloc_attrs` indicates how to allocate the received // tensors and should either be empty or `num_tensors` in size. Method doesn't // block and calls `done` when `num_tensors` are fetched. public static native void ReceiveTensorsAsync( @StdString BytePointer source_device, @StdString BytePointer target_device, @StdString BytePointer key_prefix, @Cast("tensorflow::int64") long src_incarnation, @Cast("tensorflow::int64") long num_tensors, DeviceContext device_context, @StdVector AllocatorAttributes alloc_attrs, Rendezvous rendezvous, TensorVector received_tensors, @ByVal @Cast("tensorflow::StatusCallback*") Pointer done); public static native void ReceiveTensorsAsync( @StdString String source_device, @StdString String target_device, @StdString String key_prefix, @Cast("tensorflow::int64") long src_incarnation, @Cast("tensorflow::int64") long num_tensors, DeviceContext device_context, @StdVector AllocatorAttributes alloc_attrs, Rendezvous rendezvous, TensorVector received_tensors, @ByVal @Cast("tensorflow::StatusCallback*") Pointer done); @MemberGetter public static native byte kDefaultFLRDevice(int i); @MemberGetter public static native @Cast("const char*") BytePointer kDefaultFLRDevice(); // Returns the FunctionLibraryRuntime for the corresponding device_name. public native FunctionLibraryRuntime GetFLR(@StdString BytePointer device_name); public native FunctionLibraryRuntime GetFLR(@StdString String device_name); // Returns the device incarnation for the given device_name. public native @ByVal Status GetDeviceIncarnation(@StdString BytePointer device_name, @Cast("tensorflow::int64*") LongPointer incarnation); public native @ByVal Status GetDeviceIncarnation(@StdString String device_name, @Cast("tensorflow::int64*") LongBuffer incarnation); public native @ByVal Status GetDeviceIncarnation(@StdString BytePointer device_name, @Cast("tensorflow::int64*") long... incarnation); public native @ByVal Status GetDeviceIncarnation(@StdString String device_name, @Cast("tensorflow::int64*") LongPointer incarnation); public native @ByVal Status GetDeviceIncarnation(@StdString BytePointer device_name, @Cast("tensorflow::int64*") LongBuffer incarnation); public native @ByVal Status GetDeviceIncarnation(@StdString String device_name, @Cast("tensorflow::int64*") long... incarnation); // For a given canonicalized key signature of the function instantiated // on device `device_name` and a `local_handle`, creates a handle and returns // that value. Uses core/common_runtime/framework/function.h::Canonicalize // to canonicalize the function signature. public native @Cast("tensorflow::FunctionLibraryRuntime::Handle") long AddHandle( @StdString BytePointer function_key, @StdString BytePointer device_name, @Cast("tensorflow::FunctionLibraryRuntime::LocalHandle") long local_handle); public native @Cast("tensorflow::FunctionLibraryRuntime::Handle") long AddHandle( @StdString String function_key, @StdString String device_name, @Cast("tensorflow::FunctionLibraryRuntime::LocalHandle") long local_handle); // Returns a handle if found for the given key, else returns kInvalidHandle. public native @Cast("tensorflow::FunctionLibraryRuntime::Handle") long GetHandle(@StdString BytePointer function_key); public native @Cast("tensorflow::FunctionLibraryRuntime::Handle") long GetHandle(@StdString String function_key); // For the given handle instantiated on device `device_name` returns the local // index of instantiation of that function. If the function was not // instantiated on `device_name` returns kInvalidLocalHandle. public native @Cast("tensorflow::FunctionLibraryRuntime::LocalHandle") long GetHandleOnDevice( @StdString BytePointer device_name, @Cast("tensorflow::FunctionLibraryRuntime::Handle") long handle); public native @Cast("tensorflow::FunctionLibraryRuntime::LocalHandle") long GetHandleOnDevice( @StdString String device_name, @Cast("tensorflow::FunctionLibraryRuntime::Handle") long handle); // Returns true if function with handle `handle` was instantiated on device // `device_name`. public native @Cast("bool") boolean IsInstantiatedOnDevice(@StdString BytePointer device_name, @Cast("tensorflow::FunctionLibraryRuntime::Handle") long handle); public native @Cast("bool") boolean IsInstantiatedOnDevice(@StdString String device_name, @Cast("tensorflow::FunctionLibraryRuntime::Handle") long handle); // Instantiates the function. See framework/function.h for more details. // Allows for function_name to be instantiated on different devices // as specified in attrs. public native @ByVal Status Instantiate(@StdString BytePointer function_name, @ByVal AttrSlice attrs, @Const @ByRef FunctionLibraryRuntime.InstantiateOptions options, @Cast("tensorflow::FunctionLibraryRuntime::Handle*") LongPointer handle); public native @ByVal Status Instantiate(@StdString String function_name, @ByVal AttrSlice attrs, @Const @ByRef FunctionLibraryRuntime.InstantiateOptions options, @Cast("tensorflow::FunctionLibraryRuntime::Handle*") LongBuffer handle); public native @ByVal Status Instantiate(@StdString BytePointer function_name, @ByVal AttrSlice attrs, @Const @ByRef FunctionLibraryRuntime.InstantiateOptions options, @Cast("tensorflow::FunctionLibraryRuntime::Handle*") long... handle); public native @ByVal Status Instantiate(@StdString String function_name, @ByVal AttrSlice attrs, @Const @ByRef FunctionLibraryRuntime.InstantiateOptions options, @Cast("tensorflow::FunctionLibraryRuntime::Handle*") LongPointer handle); public native @ByVal Status Instantiate(@StdString BytePointer function_name, @ByVal AttrSlice attrs, @Const @ByRef FunctionLibraryRuntime.InstantiateOptions options, @Cast("tensorflow::FunctionLibraryRuntime::Handle*") LongBuffer handle); public native @ByVal Status Instantiate(@StdString String function_name, @ByVal AttrSlice attrs, @Const @ByRef FunctionLibraryRuntime.InstantiateOptions options, @Cast("tensorflow::FunctionLibraryRuntime::Handle*") long... handle); // Delegates to the local FLR that owns state corresponding to `handle` and // tells it to release it. If the `handle` isnt' needed at all, the local FLR // might call RemoveHandle on this to get rid of the state owned by the Proc // FLR. public native @ByVal Status ReleaseHandle(@Cast("tensorflow::FunctionLibraryRuntime::Handle") long handle); // Runs the function with given `handle`. Function could have been // instantiated on any device. More details in framework/function.h public native void Run(@Const @ByRef FunctionLibraryRuntime.Options opts, @Cast("tensorflow::FunctionLibraryRuntime::Handle") long handle, @ByVal TensorVector args, TensorVector rets, @ByVal @Cast("tensorflow::FunctionLibraryRuntime::DoneCallback*") Pointer done); } // namespace tensorflow // #endif // TENSORFLOW_CORE_COMMON_RUNTIME_PROCESS_FUNCTION_LIBRARY_RUNTIME_H_ // Parsed from tensorflow/core/graph/graph.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // A Graph describes a set of computations that are to be // performed, as well as the dependencies between those // computations. The basic model is a DAG (directed acyclic graph) with // * internal nodes representing computational operations to be performed; // * edges represent dependencies, indicating the target may only be // executed once the source has completed; and // * predefined "source" (start) and "sink" (finish) nodes -- the source // should be the only node that doesn't depend on anything, and the sink // should be the only node that nothing depends on. // // Note: Node ids are intended to be relatively dense in the // 0..max_id range, but there may be gaps since ids won't be reused. // // Note: Some dependencies between operations are due to one operation // consuming the output of another. In fact operations can produce // multiple outputs and consume multiple inputs, and some // optimizations will care about which specific outputs are connected // to which specific inputs. We therefore represent data dependency // between output O of layer A and input I of layer B using // "input index" and "output index" labels per edge. // #ifndef TENSORFLOW_CORE_GRAPH_GRAPH_H_ // #define TENSORFLOW_CORE_GRAPH_GRAPH_H_ // #include // #include // #include // #include "tensorflow/core/framework/function.h" // #include "tensorflow/core/framework/op.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/graph/edgeset.h" // #include "tensorflow/core/lib/core/arena.h" // #include "tensorflow/core/lib/core/refcount.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/lib/gtl/iterator_range.h" // #include "tensorflow/core/platform/logging.h" // #include "tensorflow/core/platform/macros.h" // #include "tensorflow/core/platform/types.h" @Namespace("tensorflow") @Opaque public static class EdgeSetTest extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public EdgeSetTest() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EdgeSetTest(Pointer p) { super(p); } } @Namespace("tensorflow") @Opaque public static class WhileContext extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public WhileContext() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public WhileContext(Pointer p) { super(p); } } // Declared below // Declared below @Namespace("tensorflow") @Opaque public static class NodeProperties extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public NodeProperties() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NodeProperties(Pointer p) { super(p); } } // Defined in .cc @Namespace("tensorflow") @NoOffset public static class Node extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Node(Pointer p) { super(p); } public native @StdString BytePointer DebugString(); public native int id(); public native int cost_id(); public native @StdString BytePointer name(); public native @StdString BytePointer type_string(); // def() provides the NodeDef the user supplied, but the specifics // of this Node may have changed due to placement, optimization, etc. // In particular: // * def().name() will match name(); // * def().op() will match type_string() and op_def().name(); // * def().input() is not reliable, use "in_edges()" below instead; // * def().device() is the "user's requested device" and may not match // the actual assigned device, see assigned_device_name() below; // * def().attr() is authoritative. // TODO(irving): Replace with NodeInfo. public native @Const @ByRef NodeDef def(); public native @Const @ByRef OpDef op_def(); // input and output types public native int num_inputs(); public native @Cast("tensorflow::DataType") int input_type(int i); public native @Const @ByRef DataTypeVector input_types(); public native int num_outputs(); public native @Cast("tensorflow::DataType") int output_type(int o); public native @Const @ByRef DataTypeVector output_types(); // The device requested by the user. For the actual assigned device, // use assigned_device_name() below. public native @StdString BytePointer requested_device(); // This changes the user requested device but not necessarily the device that // on which the operation will run. public native void set_requested_device(@StdString BytePointer device); public native void set_requested_device(@StdString String device); // This gives the device the runtime has assigned this node to. If // you want the device the user requested, use def().device() instead. // TODO(josh11b): Validate that the assigned_device, if not empty: // fully specifies a device, and satisfies def().device(). // TODO(josh11b): Move assigned_device_name outside of Node into a // NodeId->DeviceName map. public native @StdString BytePointer assigned_device_name(); public native void set_assigned_device_name(@StdString BytePointer device_name); public native void set_assigned_device_name(@StdString String device_name); public native @Cast("bool") boolean has_assigned_device_name(); public native int assigned_device_name_index(); public native void set_assigned_device_name_index(int index); // Read only access to attributes public native @ByVal AttrSlice attrs(); // Inputs requested by the NodeDef. For the actual inputs, use in_edges. // Get the neighboring nodes via edges either in or out of this node. This // includes control edges. public native @ByVal NeighborIterRange in_nodes(); public native @ByVal NeighborIterRange out_nodes(); public native @Const @ByRef EdgeSet in_edges(); public native @Const @ByRef EdgeSet out_edges(); // Node type helpers. public native @Cast("bool") boolean IsSource(); public native @Cast("bool") boolean IsSink(); // Anything other than the special Source & Sink nodes. public native @Cast("bool") boolean IsOp(); // Node class helpers public native @Cast("bool") boolean IsSwitch(); public native @Cast("bool") boolean IsMerge(); public native @Cast("bool") boolean IsEnter(); public native @Cast("bool") boolean IsExit(); public native @Cast("bool") boolean IsNextIteration(); public native @Cast("bool") boolean IsLoopCond(); public native @Cast("bool") boolean IsControlTrigger(); public native @Cast("bool") boolean IsSend(); public native @Cast("bool") boolean IsRecv(); public native @Cast("bool") boolean IsConstant(); public native @Cast("bool") boolean IsVariable(); public native @Cast("bool") boolean IsIdentity(); public native @Cast("bool") boolean IsGetSessionHandle(); public native @Cast("bool") boolean IsGetSessionTensor(); public native @Cast("bool") boolean IsDeleteSessionTensor(); public native @Cast("bool") boolean IsControlFlow(); public native @Cast("bool") boolean IsHostSend(); public native @Cast("bool") boolean IsHostRecv(); public native @Cast("bool") boolean IsScopedAllocator(); public native @Cast("bool") boolean IsCollective(); public native @Cast("bool") boolean IsMetadata(); public native void ClearAttr(@StdString BytePointer name); public native void ClearAttr(@StdString String name); // Returns into '*e' the edge connecting to the 'idx' input of this Node. public native @ByVal Status input_edge(int idx, @Cast("const tensorflow::Edge**") PointerPointer e); public native @ByVal Status input_edge(int idx, @Const @ByPtrPtr Edge e); // Returns into '*edges' the input data edges of this Node, indexed by input // number. Does not return control edges. public native @ByVal Status input_edges(@Cast("std::vector*") EdgeVector edges); // Returns into '*n' the node that has an output connected to the // 'idx' input of this Node. public native @ByVal Status input_node(int idx, @Cast("const tensorflow::Node**") PointerPointer n); public native @ByVal Status input_node(int idx, @Const @ByPtrPtr Node n); public native WhileContext while_ctx(); public native void set_while_ctx(WhileContext while_ctx); } // Represents an input of a node, i.e., the `index`-th input to `node`. @Namespace("tensorflow") @NoOffset public static class InputTensor extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InputTensor(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public InputTensor(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public InputTensor position(long position) { return (InputTensor)super.position(position); } @MemberGetter public native @Const Node node(); public native int index(); public native InputTensor index(int index); public InputTensor(@Const Node n, int i) { super((Pointer)null); allocate(n, i); } private native void allocate(@Const Node n, int i); public InputTensor() { super((Pointer)null); allocate(); } private native void allocate(); // Returns true if this InputTensor is identical to 'other'. Nodes are // compared using pointer equality. public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef InputTensor other); // A hash function for InputTensors. Nodes are hashed based on their pointer // value. public static class Hash extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Hash() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Hash(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Hash(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Hash position(long position) { return (Hash)super.position(position); } public native @Cast("tensorflow::uint64") @Name("operator ()") long apply(@Const @ByRef InputTensor s); } } // Represents an output of a node, i.e., the `index`-th output of `node`. Note // that a single `OutputTensor` can correspond to multiple `Edge`s if the output // is consumed by multiple destination nodes. @Namespace("tensorflow") @NoOffset public static class OutputTensor extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OutputTensor(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public OutputTensor(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public OutputTensor position(long position) { return (OutputTensor)super.position(position); } @MemberGetter public native @Const Node node(); public native int index(); public native OutputTensor index(int index); public OutputTensor(@Const Node n, int i) { super((Pointer)null); allocate(n, i); } private native void allocate(@Const Node n, int i); public OutputTensor() { super((Pointer)null); allocate(); } private native void allocate(); // Returns true if this OutputTensor is identical to 'other'. Nodes are // compared using pointer equality. public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef OutputTensor other); // A hash function for OutputTensors. Nodes are hashed based on their pointer // value. public static class Hash extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Hash() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Hash(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Hash(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Hash position(long position) { return (Hash)super.position(position); } public native @Cast("tensorflow::uint64") @Name("operator ()") long apply(@Const @ByRef OutputTensor s); } } @Namespace("tensorflow") @NoOffset public static class Edge extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Edge(Pointer p) { super(p); } public native Node src(); public native Node dst(); public native int id(); // Return the index of the source output that produces the data // carried by this edge. The special value kControlSlot is used // for control dependencies. public native int src_output(); // Return the index of the destination input that consumes the data // carried by this edge. The special value kControlSlot is used // for control dependencies. public native int dst_input(); // Return true iff this is an edge that indicates a control-flow // (as opposed to a data-flow) dependency. public native @Cast("bool") boolean IsControlEdge(); public native @StdString BytePointer DebugString(); } // Allows for iteration of the edges of a Graph, by iterating the underlying // Graph.edges_ vector while skipping over null entries. @Namespace("tensorflow") @NoOffset public static class GraphEdgesIterable extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GraphEdgesIterable(Pointer p) { super(p); } public GraphEdgesIterable(@Cast("const std::vector*") @ByRef EdgeVector edges) { super((Pointer)null); allocate(edges); } private native void allocate(@Cast("const std::vector*") @ByRef EdgeVector edges); @NoOffset public static class const_iterator extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public const_iterator(Pointer p) { super(p); } public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef const_iterator other); public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef const_iterator other); // This is the prefix increment operator (++x), which is the operator // used by C++ range iteration (for (x : y) ...). We intentionally do not // provide a postfix increment operator. public native @ByRef @Name("operator ++") const_iterator increment(); public native @Cast("tensorflow::GraphEdgesIterable::value_type") @Name("operator *") Edge multiply(); } public native @ByVal const_iterator begin(); public native @ByVal const_iterator end(); } // Thread compatible but not thread safe. @Namespace("tensorflow") @NoOffset public static class Graph extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Graph(Pointer p) { super(p); } // Constructs a graph with a single SOURCE (always id kSourceId) and a // single SINK (always id kSinkId) node, and an edge from SOURCE->SINK. // // The graph can hold ops found in registry. `registry`s lifetime must be at // least that of the constructed graph's. public Graph(@Const OpRegistryInterface registry) { super((Pointer)null); allocate(registry); } private native void allocate(@Const OpRegistryInterface registry); // Constructs a graph with a single SOURCE (always id kSourceId) and a // single SINK (always id kSinkId) node, and an edge from SOURCE->SINK. // // The graph can hold ops found in `flib_def`. Unlike the constructor taking // an OpRegistryInterface, this constructor copies the function definitions in // `flib_def` so its lifetime may be shorter than that of the graph's. The // OpRegistryInterface backing `flib_def` must still have the lifetime of the // graph though. public Graph(@Const @ByRef FunctionLibraryDefinition flib_def) { super((Pointer)null); allocate(flib_def); } private native void allocate(@Const @ByRef FunctionLibraryDefinition flib_def); @MemberGetter public static native int kControlSlot(); public static final int kControlSlot = kControlSlot(); // The GraphDef version range of this graph (see graph.proto). public native @Const @ByRef VersionDef versions(); public native void set_versions(@Const @ByRef VersionDef versions); // Adds a new node to this graph, and returns it. Infers the Op and // input/output types for the node. *this owns the returned instance. // Returns nullptr and sets *status on error. public native Node AddNode(@Const @ByRef NodeDef node_def, Status status); // Copies *node, which may belong to another graph, to a new node, // which is returned. Does not copy any edges. *this owns the // returned instance. public native Node CopyNode(@Const Node node); // Removes a node from this graph, including all edges from or to it. // *node should not be accessed after calling this function. // REQUIRES: node->IsOp() public native void RemoveNode(Node node); // Adds an edge that connects the xth output of `source` to the yth input of // `dest` and returns it. Does not update dest's NodeDef. public native @Const Edge AddEdge(Node source, int x, Node dest, int y); // Adds a control edge (no data flows along this edge) that connects `source` // to `dest`. If `dest`s NodeDef is missing the corresponding control input, // adds the control input. // // If such a control edge already exists and `allow_duplicates` is false, no // edge is added and the function returns nullptr. Otherwise the edge is // unconditionally created and returned. The NodeDef is not updated if // `allow_duplicates` is true. // TODO(skyewm): // TODO(skyewm): allow_duplicates is needed only by // graph_partition.cc. Figure out if we can do away with it. public native @Const Edge AddControlEdge(Node source, Node dest, @Cast("bool") boolean allow_duplicates/*=false*/); public native @Const Edge AddControlEdge(Node source, Node dest); // Removes edge from the graph. Does not update the destination node's // NodeDef. // REQUIRES: The edge must exist. public native void RemoveEdge(@Const Edge edge); // Removes control edge `edge` from the graph. Note that this also updates // the corresponding NodeDef to reflect the change. // REQUIRES: The control edge must exist. public native void RemoveControlEdge(@Const Edge e); // Updates the input to a node. The existing edge to `dst` is removed and an // edge from `new_src` to `dst` is created. The NodeDef associated with `dst` // is also updated. public native @ByVal Status UpdateEdge(Node new_src, int new_src_index, Node dst, int dst_index); // Adds the function and gradient definitions in `fdef_lib` to this graph's op // registry. Ignores duplicate functions, and returns a bad status if an // imported function differs from an existing function or op with the same // name. public native @ByVal Status AddFunctionLibrary(@Const @ByRef FunctionDefLibrary fdef_lib); // The number of live nodes in the graph. // // Because nodes can be removed from the graph, num_nodes() is often // smaller than num_node_ids(). If one needs to create an array of // nodes indexed by node ids, num_node_ids() should be used as the // array's size. public native int num_nodes(); // The number of live nodes in the graph, excluding the Source and Sink nodes. public native int num_op_nodes(); // The number of live edges in the graph. // // Because edges can be removed from the graph, num_edges() is often // smaller than num_edge_ids(). If one needs to create an array of // edges indexed by edge ids, num_edge_ids() should be used as the // array's size. public native int num_edges(); // Serialize the nodes starting at `from_node_id` to a GraphDef. public native void ToGraphDefSubRange(GraphDef graph_def, int from_node_id); // Serialize to a GraphDef. public native void ToGraphDef(GraphDef graph_def); // This version can be called from debugger to inspect the graph content. // Use the previous version outside debug context for efficiency reasons. // // Note: We do not expose a DebugString() API, since GraphDef.DebugString() is // not defined in some TensorFlow builds. public native @ByVal GraphDef ToGraphDefDebug(); // Generate new node name with the specified prefix that is unique // across this graph. public native @StdString BytePointer NewName(@StringPiece BytePointer prefix); public native @StdString String NewName(@StringPiece String prefix); // Access to the list of all nodes. Example usage: // for (Node* node : graph.nodes()) { ... } public native @ByVal NodeIterRange nodes(); // Access to the list of all nodes, excluding the Source and Sink nodes. public native @ByVal NodeIterRange op_nodes(); // Returns one more than the maximum id assigned to any node. public native int num_node_ids(); // Returns the node associated with an id, or nullptr if no node // with that id (the node with that id was removed and the id has // not yet been re-used). *this owns the returned instance. // REQUIRES: 0 <= id < num_node_ids(). public native Node FindNodeId(int id); // Returns one more than the maximum id assigned to any edge. public native int num_edge_ids(); // Returns the Edge associated with an id, or nullptr if no edge // with that id (the node with that id was removed and the id has // not yet been re-used). *this owns the returned instance. // REQUIRES: 0 <= id < num_node_ids(). public native @Const Edge FindEdgeId(int id); // Access to the set of all edges. Example usage: // for (const Edge* e : graph.edges()) { ... } public native @ByVal GraphEdgesIterable edges(); // The pre-defined nodes. /** enum tensorflow::Graph:: */ public static final int kSourceId = 0, kSinkId = 1; public native Node source_node(); public native Node sink_node(); public native @Const OpRegistryInterface op_registry(); public native @Const @ByRef FunctionLibraryDefinition flib_def(); public native void CheckDeviceNameIndex(int index); public native int InternDeviceName(@StdString BytePointer device_name); public native int InternDeviceName(@StdString String device_name); public native @StdString BytePointer get_assigned_device_name(@Const @ByRef Node node); public native void set_assigned_device_name_index(Node node, int device_name_index); public native void set_assigned_device_name(Node node, @StdString BytePointer device_name); public native void set_assigned_device_name(Node node, @StdString String device_name); // Returns OK if `node` is non-null and belongs to this graph public native @ByVal Status IsValidNode(@Const Node node); // Returns OK if IsValidNode(`node`) and `idx` is less than // node->num_outputs() public native @ByVal Status IsValidOutputTensor(@Const Node node, int idx); // Returns OK if IsValidNode(`node`) and `idx` is less than // node->num_inputs() public native @ByVal Status IsValidInputTensor(@Const Node node, int idx); // Create and return a new WhileContext owned by this graph. This is called // when a new while loop is created. `frame_name` must be unique among // WhileContexts in this graph. public native @ByVal Status AddWhileContext(@StringPiece BytePointer frame_name, @ByVal NodeVector enter_nodes, @ByVal NodeVector exit_nodes, @ByVal OutputTensor cond_output, @StdVector OutputTensor body_inputs, @StdVector OutputTensor body_outputs, @Cast("tensorflow::WhileContext**") PointerPointer result); public native @ByVal Status AddWhileContext(@StringPiece BytePointer frame_name, @ByVal NodeVector enter_nodes, @ByVal NodeVector exit_nodes, @ByVal OutputTensor cond_output, @StdVector OutputTensor body_inputs, @StdVector OutputTensor body_outputs, @ByPtrPtr WhileContext result); public native @ByVal Status AddWhileContext(@StringPiece String frame_name, @ByVal NodeVector enter_nodes, @ByVal NodeVector exit_nodes, @ByVal OutputTensor cond_output, @StdVector OutputTensor body_inputs, @StdVector OutputTensor body_outputs, @ByPtrPtr WhileContext result); } // TODO(josh11b): We may want to support keeping an index on various // node/edge attributes in a graph, particularly node names. // Helper routines @Namespace("tensorflow") public static native @Cast("bool") boolean IsSource(@Const Node node); @Namespace("tensorflow") public static native @Cast("bool") boolean IsSink(@Const Node node); @Namespace("tensorflow") public static native @Cast("bool") boolean IsSwitch(@Const Node node); @Namespace("tensorflow") public static native @Cast("bool") boolean IsMerge(@Const Node node); @Namespace("tensorflow") public static native @Cast("bool") boolean IsEnter(@Const Node node); @Namespace("tensorflow") public static native @Cast("bool") boolean IsExit(@Const Node node); @Namespace("tensorflow") public static native @Cast("bool") boolean IsNextIteration(@Const Node n); @Namespace("tensorflow") public static native @Cast("bool") boolean IsLoopCond(@Const Node node); @Namespace("tensorflow") public static native @Cast("bool") boolean IsControlTrigger(@Const Node n); @Namespace("tensorflow") public static native @Cast("bool") boolean IsSend(@Const Node node); @Namespace("tensorflow") public static native @Cast("bool") boolean IsRecv(@Const Node node); @Namespace("tensorflow") public static native @Cast("bool") boolean IsHostSend(@Const Node node); @Namespace("tensorflow") public static native @Cast("bool") boolean IsHostRecv(@Const Node node); // True for Nodes that mediate the transfer of values between processes. @Namespace("tensorflow") public static native @Cast("bool") boolean IsTransferNode(@Const Node n); @Namespace("tensorflow") public static native @Cast("bool") boolean IsConstant(@Const Node node); @Namespace("tensorflow") public static native @Cast("bool") boolean IsVariable(@Const Node node); @Namespace("tensorflow") public static native @Cast("bool") boolean IsIdentity(@Const Node node); // Returns true iff 'n' is a control flow node. @Namespace("tensorflow") public static native @Cast("bool") boolean IsControlFlow(@Const Node n); // Returns true if the node only depends on its input's metadata // (shape). Specifically, returns true for "Size", "Shape" and "Rank" ops. @Namespace("tensorflow") public static native @Cast("bool") boolean IsMetadata(@Const Node n); @Namespace("tensorflow") public static native @Cast("bool") boolean IsScopedAllocator(@Const Node n); @Namespace("tensorflow") public static native @Cast("bool") boolean IsHostMemoryPreserving(@Const Node node); // Iterator for stepping through the nodes of a graph. @Namespace("tensorflow") @NoOffset public static class NodeIter extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NodeIter(Pointer p) { super(p); } public NodeIter(@Const Graph graph, int id) { super((Pointer)null); allocate(graph, id); } private native void allocate(@Const Graph graph, int id); public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef NodeIter rhs); public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef NodeIter rhs); public native @Name("operator ++") void increment(); public native @Name("operator *") Node multiply(); public native @Name("operator ->") Node access(); } // Iterator for stepping through the neighbors of a node. @Namespace("tensorflow") @NoOffset public static class NeighborIter extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NeighborIter(Pointer p) { super(p); } public NeighborIter(@ByVal EdgeSetIterator iter, @Cast("bool") boolean incoming) { super((Pointer)null); allocate(iter, incoming); } private native void allocate(@ByVal EdgeSetIterator iter, @Cast("bool") boolean incoming); public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef NeighborIter rhs); public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef NeighborIter rhs); public native @Name("operator ++") void increment(); public native @Name("operator *") Node multiply(); public native @Name("operator ->") Node access(); } // IMPLEMENTATION DETAILS, PLEASE IGNORE // namespace tensorflow // #endif // TENSORFLOW_CORE_GRAPH_GRAPH_H_ // Parsed from tensorflow/core/graph/tensor_id.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_GRAPH_TENSOR_ID_H_ // #define TENSORFLOW_GRAPH_TENSOR_ID_H_ // #include // #include "tensorflow/core/graph/graph.h" // #include "tensorflow/core/lib/core/stringpiece.h" // #include "tensorflow/core/lib/hash/hash.h" // #include "tensorflow/core/lib/strings/strcat.h" // Identifier for a tensor within a step. // first == operation_name, second == output_index // Note: does not own backing storage for name. @Namespace("tensorflow") public static class TensorId extends StringPieceIntPair { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorId(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TensorId(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public TensorId position(long position) { return (TensorId)super.position(position); } // Inherit the set of constructors. // NOTE(skyewm): this is required on some platforms. I'm not sure why the // using statement above isn't always sufficient. public TensorId() { super((Pointer)null); allocate(); } private native void allocate(); public TensorId(@Const @ByRef SafeTensorId id) { super((Pointer)null); allocate(id); } private native void allocate(@Const @ByRef SafeTensorId id); public native @StdString BytePointer ToString(); public static class Hasher extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Hasher() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Hasher(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Hasher(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Hasher position(long position) { return (Hasher)super.position(position); } public native @Cast("std::size_t") @Name("operator ()") long apply(@Const @ByRef TensorId x); } } @Namespace("tensorflow") public static native @ByVal TensorId ParseTensorName(@StdString BytePointer name); @Namespace("tensorflow") public static native @ByVal TensorId ParseTensorName(@StdString String name); // Same as TensorId, except owns the backing storage for the op name. This makes // the memory management simpler at the expense of a copy. @Namespace("tensorflow") public static class SafeTensorId extends StringIntPair { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SafeTensorId(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public SafeTensorId(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public SafeTensorId position(long position) { return (SafeTensorId)super.position(position); } // NOTE(skyewm): this is required on some platforms. I'm not sure why the // using "using Base::pair;" isn't always sufficient. public SafeTensorId() { super((Pointer)null); allocate(); } private native void allocate(); public SafeTensorId(@StdString BytePointer str, int idx) { super((Pointer)null); allocate(str, idx); } private native void allocate(@StdString BytePointer str, int idx); public SafeTensorId(@StdString String str, int idx) { super((Pointer)null); allocate(str, idx); } private native void allocate(@StdString String str, int idx); public SafeTensorId(@Const @ByRef TensorId id) { super((Pointer)null); allocate(id); } private native void allocate(@Const @ByRef TensorId id); public native @StdString BytePointer ToString(); public static class Hasher extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Hasher() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Hasher(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Hasher(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Hasher position(long position) { return (Hasher)super.position(position); } public native @Cast("std::size_t") @Name("operator ()") long apply(@Const @ByRef TensorId x); } } // namespace tensorflow // #endif // TENSORFLOW_GRAPH_TENSOR_ID_H_ // Parsed from tensorflow/core/common_runtime/graph_runner.h /* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_GRAPH_RUNNER_H_ // #define TENSORFLOW_CORE_COMMON_RUNTIME_GRAPH_RUNNER_H_ // #include // #include // #include // #include "tensorflow/core/common_runtime/device.h" // #include "tensorflow/core/framework/function.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/graph/graph.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/platform/env.h" // GraphRunner takes a Graph, some inputs to feed, and some outputs // to fetch and executes the graph required to feed and fetch the // inputs and outputs. // // This class is only meant for internal use where one needs to // partially evaluate inexpensive nodes in a graph, such as for shape // inference or for constant folding. Because of its limited, simple // use-cases, it executes all computation on the given device (CPU by default) // and is not meant to be particularly lightweight, fast, or efficient. @Namespace("tensorflow") @NoOffset public static class GraphRunner extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GraphRunner(Pointer p) { super(p); } // REQUIRES: `env` is not nullptr. public GraphRunner(Env env) { super((Pointer)null); allocate(env); } private native void allocate(Env env); // REQUIRES: 'device' is not nullptr. Not owned. public GraphRunner(Device device) { super((Pointer)null); allocate(device); } private native void allocate(Device device); // Function semantics for `inputs`, `output_names` and `outputs` // matches those from Session::Run(). // // NOTE: The output tensors share lifetime with the GraphRunner, and could // be destroyed once the GraphRunner is destroyed. // // REQUIRES: `graph`, `env`, and `outputs` are not nullptr. // `function_library` may be nullptr. public native @ByVal Status Run(Graph graph, FunctionLibraryRuntime function_library, @Cast("const tensorflow::GraphRunner::NamedTensorList*") @ByRef StringTensorPairVector inputs, @Const @ByRef StringVector output_names, TensorVector outputs); } // namespace tensorflow // #endif // TENSORFLOW_CORE_COMMON_RUNTIME_GRAPH_RUNNER_H_ // Parsed from tensorflow/core/common_runtime/shape_refiner.h /* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_SHAPE_REFINER_H_ // #define TENSORFLOW_CORE_COMMON_RUNTIME_SHAPE_REFINER_H_ // #include // #include "tensorflow/core/common_runtime/graph_runner.h" // #include "tensorflow/core/framework/function.pb.h" // #include "tensorflow/core/framework/shape_inference.h" // #include "tensorflow/core/graph/graph.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/platform/macros.h" // This class stores extra inference information in addition to // InferenceContext, such as inference tree for user-defined functions and node // input and output types. // ShapeRefiner performs shape inference for TensorFlow Graphs. It is // responsible for instantiating InferenceContext objects for each // Node in the Graph, and providing/storing the 'input_tensor' Tensors // used by Shape Inference functions, when available at graph // construction time. @Namespace("tensorflow") @NoOffset public static class ShapeRefiner extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ShapeRefiner(Pointer p) { super(p); } public ShapeRefiner(int graph_def_version, @Const OpRegistryInterface ops) { super((Pointer)null); allocate(graph_def_version, ops); } private native void allocate(int graph_def_version, @Const OpRegistryInterface ops); // Same as ShapeRefiner(versions.producer(), ops) public ShapeRefiner(@Const @ByRef VersionDef versions, @Const OpRegistryInterface ops) { super((Pointer)null); allocate(versions, ops); } private native void allocate(@Const @ByRef VersionDef versions, @Const OpRegistryInterface ops); // Performs validation of 'node' and runs 'node's shape function, // storing its shape outputs. // // All inputs of 'node' must be added to ShapeRefiner prior to // adding 'node'. // // Returns an error if: // - the shape function for 'node' was not registered. // - 'node' was added before its inputs. // - The shape inference function returns an error. public native @ByVal Status AddNode(@Const Node node); // Sets 'node's 'output_port' output to have shape 'shape'. // // Returns an error if 'node' was not previously added to this // object, if 'output_port' is invalid, or if 'shape' is // not compatible with the existing shape of the output. public native @ByVal Status SetShape(@Const Node node, int output_port, @ByVal ShapeHandle shape); // Update the input shapes of node in case the shapes of the fan-ins of 'node' // have themselves been modified (For example, in case of incremental shape // refinement). If 'relax' is true, a new shape with the broadest set of // information will be set as the new input (see InferenceContext::RelaxInput // for full details and examples). Sets refined to true if any shapes have // changed (in their string representations). Note that shapes may have been // updated to newer versions (but with identical string representations) even // if <*refined> is set to false. public native @ByVal Status UpdateNode(@Const Node node, @Cast("bool") boolean relax, @Cast("bool*") BoolPointer refined); public native @ByVal Status UpdateNode(@Const Node node, @Cast("bool") boolean relax, @Cast("bool*") boolean... refined); // Returns the InferenceContext for 'node', if present. public native InferenceContext GetContext(@Const Node node); // Returns the ExtendedInferenceContext for 'node', if present. // Getters and setters for graph_def_version_. public native int graph_def_version(); public native void set_graph_def_version(int version); public native void set_require_shape_inference_fns(@Cast("bool") boolean require_shape_inference_fns); public native void set_disable_constant_propagation(@Cast("bool") boolean disable); // Set function library to enable function shape inference. // Without function library, function inference always yields unknown shapes. // With this enabled, shape inference can take more time since it descends // into all function calls. It doesn't do inference once for each function // definition, but once for each function call. // The function library must outlive the shape refiner. public native void set_function_library_for_shape_inference( @Const FunctionLibraryDefinition lib); public native @Cast("bool") boolean function_shape_inference_supported(); // Call this to keep nested shapes information for user-defined functions: // nested inferences will be available on the ExtendedInferenceContext for // each function node, forming a tree of shape inferences corresponding to the // tree of nested function calls. By default this setting is disabled, and // only the shapes for the top-level function node will be reported on the // InferenceContext for each function node, to reduce memory usage. // // This flag has no effect when the function inference is not enabled via // set_function_library_for_shape_inference. public native void set_keep_nested_shape_inferences(); } // namespace tensorflow // #endif // TENSORFLOW_CORE_COMMON_RUNTIME_SHAPE_REFINER_H_ // Parsed from tensorflow/core/framework/node_def_builder.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_NODE_DEF_BUILDER_H_ // #define TENSORFLOW_CORE_FRAMEWORK_NODE_DEF_BUILDER_H_ // #include // #include // #include "tensorflow/core/framework/attr_value_util.h" // #include "tensorflow/core/framework/node_def.pb.h" // #include "tensorflow/core/framework/node_def_util.h" // #include "tensorflow/core/framework/op.h" // #include "tensorflow/core/framework/op_def.pb.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/lib/gtl/array_slice.h" // #include "tensorflow/core/lib/strings/strcat.h" // This is a helper for creating a NodeDef. Automatically sets attrs // that can be inferred from the inputs, and uses default values // (where they exist) for unspecified attrs. Example usage: // // NodeDef node_def; // Status status = NodeDefBuilder(node_name, op_name) // .Input(...) // .Attr(...) // .Finalize(&node_def); // if (!status.ok()) return status; // // Use node_def here. @Namespace("tensorflow") @NoOffset public static class NodeDefBuilder extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NodeDefBuilder(Pointer p) { super(p); } // To specify an output to be consumed by one of the Input() methods below. @NoOffset public static class NodeOut extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NodeOut(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public NodeOut(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public NodeOut position(long position) { return (NodeOut)super.position(position); } public NodeOut(@StringPiece BytePointer n, int i, @Cast("tensorflow::DataType") int dt) { super((Pointer)null); allocate(n, i, dt); } private native void allocate(@StringPiece BytePointer n, int i, @Cast("tensorflow::DataType") int dt); public NodeOut(@StringPiece String n, int i, @Cast("tensorflow::DataType") int dt) { super((Pointer)null); allocate(n, i, dt); } private native void allocate(@StringPiece String n, int i, @Cast("tensorflow::DataType") int dt); public NodeOut() { super((Pointer)null); allocate(); } private native void allocate(); // uninitialized, call Reset() before use. public native void Reset(@StringPiece BytePointer n, int i, @Cast("tensorflow::DataType") int dt); public native void Reset(@StringPiece String n, int i, @Cast("tensorflow::DataType") int dt); public native @StdString BytePointer node(); public native NodeOut node(BytePointer node); public native int index(); public native NodeOut index(int index); public native @Cast("tensorflow::DataType") int data_type(); public native NodeOut data_type(int data_type); } // Specify the name and the Op (either via an OpDef or the name of // the Op plus a registry) for the NodeDef. Other fields are // specified by calling the methods below. // REQUIRES: The OpDef must satisfy ValidateOpDef(). public NodeDefBuilder(@StringPiece BytePointer name, @StringPiece BytePointer op_name, @Const OpRegistryInterface op_registry/*=tensorflow::OpRegistry::Global()*/) { super((Pointer)null); allocate(name, op_name, op_registry); } private native void allocate(@StringPiece BytePointer name, @StringPiece BytePointer op_name, @Const OpRegistryInterface op_registry/*=tensorflow::OpRegistry::Global()*/); public NodeDefBuilder(@StringPiece BytePointer name, @StringPiece BytePointer op_name) { super((Pointer)null); allocate(name, op_name); } private native void allocate(@StringPiece BytePointer name, @StringPiece BytePointer op_name); public NodeDefBuilder(@StringPiece String name, @StringPiece String op_name, @Const OpRegistryInterface op_registry/*=tensorflow::OpRegistry::Global()*/) { super((Pointer)null); allocate(name, op_name, op_registry); } private native void allocate(@StringPiece String name, @StringPiece String op_name, @Const OpRegistryInterface op_registry/*=tensorflow::OpRegistry::Global()*/); public NodeDefBuilder(@StringPiece String name, @StringPiece String op_name) { super((Pointer)null); allocate(name, op_name); } private native void allocate(@StringPiece String name, @StringPiece String op_name); // REQUIRES: in addition, *op_def must outlive *this. public NodeDefBuilder(@StringPiece BytePointer name, @Const OpDef op_def) { super((Pointer)null); allocate(name, op_def); } private native void allocate(@StringPiece BytePointer name, @Const OpDef op_def); public NodeDefBuilder(@StringPiece String name, @Const OpDef op_def) { super((Pointer)null); allocate(name, op_def); } private native void allocate(@StringPiece String name, @Const OpDef op_def); // You must call one Input() function per input_arg in the Op, // *and in the same order as the input_args appear in the OpDef.* // For inputs that take a single tensor. public native @ByRef NodeDefBuilder Input(@StringPiece BytePointer src_node, int src_index, @Cast("tensorflow::DataType") int dt); public native @ByRef NodeDefBuilder Input(@StringPiece String src_node, int src_index, @Cast("tensorflow::DataType") int dt); public native @ByRef NodeDefBuilder Input(@Const @ByRef NodeOut src); // For inputs that take a list of tensors. // To create inputs in tests, see fake_input.h. public native @ByRef NodeDefBuilder Input(@ByVal @Cast("tensorflow::FakeInputFunctor*") Pointer fake_input); // Specify that this node must only run after src_node. public native @ByRef NodeDefBuilder ControlInput(@StringPiece BytePointer src_node); public native @ByRef NodeDefBuilder ControlInput(@StringPiece String src_node); // Constrains what devices this node may be scheduled on. public native @ByRef NodeDefBuilder Device(@StringPiece BytePointer device_spec); public native @ByRef NodeDefBuilder Device(@StringPiece String device_spec); // Sets the attr, if not already set. If already set with a different // value, an error will be returned from Finalize(). public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @Const @ByRef AttrValue value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @Const @ByRef AttrValue value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @StringPiece BytePointer value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @StringPiece String value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, int value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, int value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @Cast("tensorflow::int64") long value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @Cast("tensorflow::int64") long value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, float value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, float value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, double value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, double value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @Cast("bool") boolean value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @Cast("bool") boolean value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @Const @ByRef PartialTensorShape value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @Const @ByRef PartialTensorShape value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @Const @ByRef Tensor value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @Const @ByRef Tensor value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @Const @ByRef TensorProto value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @Const @ByRef TensorProto value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @Const @ByRef NameAttrList value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @Const @ByRef NameAttrList value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringPieceVector value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringPieceVector value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @Cast("const char**") @ArraySlice PointerPointer value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @Cast("const char**") @ArraySlice @ByPtrPtr ByteBuffer value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @Cast("const char**") @ArraySlice @ByPtrPtr byte[] value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @Cast("const char**") @ArraySlice @ByPtrPtr BytePointer value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @Cast("const char**") @ArraySlice @ByPtrPtr ByteBuffer value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @Cast("const char**") @ArraySlice @ByPtrPtr byte[] value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @ArraySlice IntPointer value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @ArraySlice IntBuffer value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @ArraySlice int... value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @ArraySlice IntPointer value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @ArraySlice IntBuffer value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @ArraySlice int... value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @Cast("tensorflow::int64*") @ArraySlice LongPointer value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @Cast("tensorflow::int64*") @ArraySlice LongBuffer value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @Cast("tensorflow::int64*") @ArraySlice long... value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @Cast("tensorflow::int64*") @ArraySlice LongPointer value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @Cast("tensorflow::int64*") @ArraySlice LongBuffer value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @Cast("tensorflow::int64*") @ArraySlice long... value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @ArraySlice FloatPointer value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @ArraySlice FloatBuffer value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @ArraySlice float... value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @ArraySlice FloatPointer value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @ArraySlice FloatBuffer value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @ArraySlice float... value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @Cast("bool*") @ArraySlice BoolPointer value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @Cast("bool*") @ArraySlice boolean... value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @ByVal @Cast("tensorflow::gtl::ArraySlice*") TensorShapeVector value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @ByVal @Cast("tensorflow::gtl::ArraySlice*") TensorShapeVector value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @ArraySlice TensorShapeProto value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @ArraySlice TensorShapeProto value); public native @ByRef NodeDefBuilder Attr(@StringPiece BytePointer name, @ByVal TensorVector value); public native @ByRef NodeDefBuilder Attr(@StringPiece String name, @ByVal TensorVector value); // Finish building the NodeDef, returning any errors or setting // *node_def if none. // WARNING: Not all problems are detected! The resulting NodeDef may // not be valid! Call ValidateNodeDef() from node_def_utils to be sure. public native @ByVal Status Finalize(NodeDef node_def); // Accessors for the values set in the constructor. public native @StdString BytePointer node_name(); public native @Const @ByRef OpDef op_def(); } // namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_NODE_DEF_BUILDER_H_ // Parsed from tensorflow/core/framework/node_def_util.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_NODE_DEF_UTIL_H_ // #define TENSORFLOW_CORE_FRAMEWORK_NODE_DEF_UTIL_H_ // #include // #include // #include "tensorflow/core/framework/attr_value_util.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/core/stringpiece.h" // #include "tensorflow/core/lib/gtl/flatmap.h" // #include "tensorflow/core/lib/hash/hash.h" // #include "tensorflow/core/platform/protobuf.h" // We forward declare protos so that kernels don't need to depend on them // Name of the attribute used to encode node colocation constraints. // // Nodes can be co-located on the same device. Desire for explicit co-location // is described by list(string) attribute containing the name of colocation // groups. @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer kColocationAttrName(); // String prefix applied to the operation name for colocation constraints. @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer kColocationGroupPrefix(); // Produce a human-readable version of a Node or NodeDef that is more concise // than a text-format proto. @Namespace("tensorflow") public static native @StdString BytePointer SummarizeNode(@Const @ByRef Node node); @Namespace("tensorflow") public static native @StdString BytePointer SummarizeNodeDef(@Const @ByRef NodeDef node_def); // Produces a formatted string pattern from the node which can uniquely identify // this node upstream to produce an informative error message. The pattern // followed is: {{node }} @Namespace("tensorflow") public static native @StdString BytePointer FormatNodeForError(@Const @ByRef Node node); @Namespace("tensorflow") public static native @StdString BytePointer FormatNodeDefForError(@Const @ByRef NodeDef node_def); // Adds an attr with name and value to *node_def. // The type of the attr is based on the type of value. @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @Const @ByRef AttrValue value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @Const @ByRef AttrValue value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @StringPiece BytePointer value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @StringPiece String value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, int value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, int value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @Cast("tensorflow::int64") long value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @Cast("tensorflow::int64") long value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, float value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, float value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, double value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, double value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @Cast("bool") boolean value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @Cast("bool") boolean value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @Const @ByRef PartialTensorShape value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @Const @ByRef PartialTensorShape value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @Const @ByRef Tensor value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @Const @ByRef Tensor value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @Const @ByRef TensorProto value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @Const @ByRef TensorProto value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @Const @ByRef NameAttrList value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @Const @ByRef NameAttrList value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringPieceVector value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringPieceVector value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @Cast("const char**") @ArraySlice PointerPointer value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @Cast("const char**") @ArraySlice @ByPtrPtr ByteBuffer value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @Cast("const char**") @ArraySlice @ByPtrPtr byte[] value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @Cast("const char**") @ArraySlice @ByPtrPtr BytePointer value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @Cast("const char**") @ArraySlice @ByPtrPtr ByteBuffer value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @Cast("const char**") @ArraySlice @ByPtrPtr byte[] value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @ArraySlice IntPointer value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @ArraySlice IntBuffer value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @ArraySlice int[] value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @ArraySlice IntPointer value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @ArraySlice IntBuffer value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @ArraySlice int[] value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @Cast("tensorflow::int64*") @ArraySlice LongPointer value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @Cast("tensorflow::int64*") @ArraySlice LongBuffer value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @Cast("tensorflow::int64*") @ArraySlice long[] value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @Cast("tensorflow::int64*") @ArraySlice LongPointer value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @Cast("tensorflow::int64*") @ArraySlice LongBuffer value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @Cast("tensorflow::int64*") @ArraySlice long[] value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @ArraySlice FloatPointer value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @ArraySlice FloatBuffer value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @ArraySlice float[] value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @ArraySlice FloatPointer value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @ArraySlice FloatBuffer value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @ArraySlice float[] value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @Cast("bool*") @ArraySlice BoolPointer value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @Cast("bool*") @ArraySlice boolean[] value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @ByVal @Cast("tensorflow::gtl::ArraySlice*") TensorShapeVector value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @ByVal @Cast("tensorflow::gtl::ArraySlice*") TensorShapeVector value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @ArraySlice TensorShapeProto value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @ArraySlice TensorShapeProto value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece BytePointer name, @ByVal TensorVector value, NodeDef node_def); @Namespace("tensorflow") public static native void AddNodeAttr(@StringPiece String name, @ByVal TensorVector value, NodeDef node_def); // Version to workaround C++'s "perfect" forwarding not being able to // forward {...} initialization. // Adds an attr to an attr value map. @Namespace("tensorflow") public static native void AddAttr(@StringPiece BytePointer name, @Const @ByRef AttrValue value, @Cast("tensorflow::AttrValueMap*") StringAttrValueMap map); @Namespace("tensorflow") public static native void AddAttr(@StringPiece String name, @Const @ByRef AttrValue value, @Cast("tensorflow::AttrValueMap*") StringAttrValueMap map); @Namespace("tensorflow") public static native void AddAttr(@StringPiece BytePointer name, @Cast("bool") boolean value, @Cast("tensorflow::AttrValueMap*") StringAttrValueMap map); @Namespace("tensorflow") public static native void AddAttr(@StringPiece String name, @Cast("bool") boolean value, @Cast("tensorflow::AttrValueMap*") StringAttrValueMap map); @Namespace("tensorflow") @NoOffset public static class AttrSlice extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AttrSlice(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public AttrSlice(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public AttrSlice position(long position) { return (AttrSlice)super.position(position); } public AttrSlice(@Const @ByRef NodeDef node_def) { super((Pointer)null); allocate(node_def); } private native void allocate(@Const @ByRef NodeDef node_def); // NOLINT(runtime/explicit) public AttrSlice() { super((Pointer)null); allocate(); } private native void allocate(); // Empty public AttrSlice(@Cast("const tensorflow::AttrValueMap*") StringAttrValueMap a) { super((Pointer)null); allocate(a); } private native void allocate(@Cast("const tensorflow::AttrValueMap*") StringAttrValueMap a); public native int size(); // Returns the attr with attr_name if found. Otherwise, returns // nullptr. public native @Const AttrValue Find(@StringPiece BytePointer attr_name); public native @Const AttrValue Find(@StringPiece String attr_name); // Returns the attr_value for attr_name if found. Otherwise, returns a // NotFound status. public native @ByVal Status Find(@StringPiece BytePointer attr_name, @Cast("const tensorflow::AttrValue**") PointerPointer attr_value); public native @ByVal Status Find(@StringPiece BytePointer attr_name, @Const @ByPtrPtr AttrValue attr_value); public native @ByVal Status Find(@StringPiece String attr_name, @Const @ByPtrPtr AttrValue attr_value); // Helper class to avoid allocations in EqualAttrs. // TODO(irving): Will go away once NodeInfo is used. public static class Scratch extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Scratch() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Scratch(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Scratch(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Scratch position(long position) { return (Scratch)super.position(position); } public native @StdString BytePointer a(); public native Scratch a(BytePointer a); public native @StdString BytePointer b(); public native Scratch b(BytePointer b); } // Check if all attrs and attr values match. Does not take defaults into // account. // // TODO(irving): There is a bug in this routine inherited from its // OptimizerCSE::EqualAttrs precedecessor. The same tensor attr can be // represented in more than one way as an AttrValue, since TensorProto is // not 1-1. This bug will go away once I replace everything with NodeInfo, // which stores a Tensor object directly. The Scratch object will also go // away. public native @Cast("bool") boolean EqualAttrs(@ByVal AttrSlice other, Scratch scratch); // If this AttrSlice has an attached NodeDef, summarize it. This is for // error messages only: we intentionally do not provide direct access to the // NodeDef, since it is not always there. public native @StdString BytePointer SummarizeNode(); // Iteration over all attrs } // Return true if the attr with the name attr_name is defined in node_def. @Namespace("tensorflow") public static native @Cast("bool") boolean HasNodeAttr(@Const @ByRef NodeDef node_def, @StringPiece BytePointer attr_name); @Namespace("tensorflow") public static native @Cast("bool") boolean HasNodeAttr(@Const @ByRef NodeDef node_def, @StringPiece String attr_name); // Look up the attr with name attr_name and set *value to its value. If no // attr with attr_name is found in node_def, or the attr does not have // a matching type, a non-ok status will be returned. @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, @StdString @Cast({"char*", "std::string*"}) BytePointer value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, @StdString @Cast({"char*", "std::string*"}) BytePointer value); // type: "string" @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, @Cast("tensorflow::int64*") LongPointer value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, @Cast("tensorflow::int64*") LongBuffer value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, @Cast("tensorflow::int64*") long... value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, @Cast("tensorflow::int64*") LongPointer value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, @Cast("tensorflow::int64*") LongBuffer value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, @Cast("tensorflow::int64*") long... value); // type: "int" @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, IntPointer value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, IntBuffer value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, int... value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, IntPointer value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, IntBuffer value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, int... value); // type: "int" @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, FloatPointer value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, FloatBuffer value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, float... value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, FloatPointer value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, FloatBuffer value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, float... value); // type: "float" @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, @Cast("bool*") BoolPointer value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, @Cast("bool*") boolean... value); // type: "bool" // type: "type" @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, TensorShapeProto value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, TensorShapeProto value); // type: "shape" @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, TensorShape value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, TensorShape value); // type: "shape" @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, PartialTensorShape value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, PartialTensorShape value); // type: "shape" @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, Tensor value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, Tensor value); // type: "tensor" @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, StringVector value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, StringVector value); // type "list(string)" // type "list(int)" // type "list(int)" // type "list(float)" // type "list(bool)" // type "list(type)" @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, DataTypeVector value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, DataTypeVector value); // type "list(type)" // type "list(shape)" @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, TensorShapeVector value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, TensorShapeVector value); // type "list(shape)" // type "list(shape)" @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, TensorVector value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, TensorVector value); // type: "list(tensor)" // This version avoids copying the TensorProto. // REQUIRES: Must not use *value beyond the lifetime of node_def. @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, @Cast("const tensorflow::TensorProto**") PointerPointer value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, @Const @ByPtrPtr TensorProto value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, @Const @ByPtrPtr TensorProto value); // type: "tensor" // This version avoids copying the NameAttrList. // REQUIRES: Must not use *value beyond the lifetime of node_def. @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, @Const @ByPtrPtr NameAttrList value); @Namespace("tensorflow") public static native @ByVal Status GetNodeAttr(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, @Const @ByPtrPtr NameAttrList value); // type: "func" // These versions copies the NameAttrList(s). // type: "func" // type: "list(func)" // Look up the attr with name attr_name and set *value to its value. If no // attr with attr_name is found in node_def, or the attr does not have // a matching type, false is returned. @Namespace("tensorflow") public static native @Cast("bool") boolean GetNodeAttrSimple(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, @StdString @Cast({"char*", "std::string*"}) BytePointer value); @Namespace("tensorflow") public static native @Cast("bool") boolean GetNodeAttrSimple(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, @StdString @Cast({"char*", "std::string*"}) BytePointer value); // type: "string" @Namespace("tensorflow") public static native @Cast("bool") boolean GetNodeAttrSimple(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name, StringVector value); @Namespace("tensorflow") public static native @Cast("bool") boolean GetNodeAttrSimple(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name, StringVector value); // type: "string" // Look up the attr with name attr_name and return a reference to its value. // If no attr with attr_name is found in node_def, or the attr does not have // a matching type, a reference to an empty string is returned. // REQUIRES: Must not use the returned value beyond the lifetime of node_def. @Namespace("tensorflow") public static native @StdString BytePointer GetNodeAttrString(@Const @ByRef AttrSlice attrs, @StringPiece BytePointer attr_name); @Namespace("tensorflow") public static native @StdString String GetNodeAttrString(@Const @ByRef AttrSlice attrs, @StringPiece String attr_name); // Computes the input type for a specific node input. // REQUIRES: ValidateOpDef(op_def).ok() @Namespace("tensorflow") public static native @ByVal Status InputTypeForNode(@Const @ByRef NodeDef node_def, @Const @ByRef OpDef op_def, int input_port, @Cast("tensorflow::DataType*") IntPointer input_type); // Computes the input types for a specific node. // REQUIRES: ValidateOpDef(op_def).ok() @Namespace("tensorflow") public static native @ByVal Status InputTypesForNode(@Const @ByRef NodeDef node_def, @Const @ByRef OpDef op_def, DataTypeVector inputs); // Computes the output type for a specific node output. // REQUIRES: ValidateOpDef(op_def).ok() @Namespace("tensorflow") public static native @ByVal Status OutputTypeForNode(@Const @ByRef NodeDef node_def, @Const @ByRef OpDef op_def, int output_port, @Cast("tensorflow::DataType*") IntPointer output_type); // Computes the output types for a specific node. // REQUIRES: ValidateOpDef(op_def).ok() @Namespace("tensorflow") public static native @ByVal Status OutputTypesForNode(@Const @ByRef NodeDef node_def, @Const @ByRef OpDef op_def, DataTypeVector outputs); // Computes the input and output types for a specific node. // REQUIRES: ValidateOpDef(op_def).ok() @Namespace("tensorflow") public static native @ByVal Status InOutTypesForNode(@Const @ByRef NodeDef node_def, @Const @ByRef OpDef op_def, DataTypeVector inputs, DataTypeVector outputs); // Computes the number of outputs for a specific node. // REQUIRES: ValidateOpDef(op_def).ok() @Namespace("tensorflow") public static native @ByVal Status NumOutputsForNode(@Const @ByRef NodeDef node_def, @Const @ByRef OpDef op_def, IntPointer num_outputs); @Namespace("tensorflow") public static native @ByVal Status NumOutputsForNode(@Const @ByRef NodeDef node_def, @Const @ByRef OpDef op_def, IntBuffer num_outputs); @Namespace("tensorflow") public static native @ByVal Status NumOutputsForNode(@Const @ByRef NodeDef node_def, @Const @ByRef OpDef op_def, int... num_outputs); // Validates that the NodeDef: // * Defines all expected attrs from the OpDef. // * All attrs satisfies constraints from the OpDef. // * Has a signature matching SignatureForNode(). // etc. @Namespace("tensorflow") public static native @ByVal Status ValidateNodeDef(@Const @ByRef NodeDef node_def, @Const @ByRef OpDef op_def); // Computes the mapping from input/output argument name to the // corresponding input/output index range. For example, // input "foo" corresponds to input indices // [ (*inputs)["foo"].first, (*inputs)["foo"].second ). // NOTE(mrry): To reduce allocations when the map is used and save // space, the returned `NameRangeMap` objects borrow the input/output // argument names from `op_def`. The `op_def` must outlive the // returned `NameRangeMap` objects. @Namespace("tensorflow") public static native @ByVal Status NameRangesForNode(@Const @ByRef NodeDef node_def, @Const @ByRef OpDef op_def, NameRangeMap inputs, NameRangeMap outputs); @Namespace("tensorflow") public static native @ByVal Status NameRangesForNode(@Const @ByRef Node node, @Const @ByRef OpDef op_def, NameRangeMap inputs, NameRangeMap outputs); // Adds default values to *node_def for unspecified attrs from op_def. @Namespace("tensorflow") public static native void AddDefaultsToNodeDef(@Const @ByRef OpDef op_def, NodeDef node_def); // Validates the syntax of a NodeDef provided externally. // // The following is an EBNF-style syntax for NodeDef objects. Note that // Node objects are actually specified as tensorflow::NodeDef protocol buffers, // which contain many other fields that are not (currently) validated. // // Node = NodeName, Inputs // Inputs = ( DataInput * ), ( ControlInput * ) // DataInput = NodeName, ( ":", [1-9], [0-9] * ) ? // ControlInput = "^", NodeName // NodeName = [A-Za-z0-9.], [A-Za-z0-9_./] * @Namespace("tensorflow") public static native @ByVal Status ValidateExternalNodeDefSyntax(@Const @ByRef NodeDef node_def); // Returns "status" with kernel's NodeDef attached as additional text // in the error message. @Namespace("tensorflow") public static native @ByVal Status AttachDef(@Const @ByRef Status status, @Const @ByRef NodeDef node_def); @Namespace("tensorflow") public static native @ByVal Status AttachDef(@Const @ByRef Status status, @Const @ByRef Node node); // Appends the given prefix and suffix to the original node name in order to // make the name unique. If it's an "Enter" node, use the same way to reset // attribute "frame_name". @Namespace("tensorflow") public static native @ByVal Status AddPrefixAndSuffixToNode(@StringPiece BytePointer prefix, @StringPiece BytePointer suffix, NodeDef node_def); @Namespace("tensorflow") public static native @ByVal Status AddPrefixAndSuffixToNode(@StringPiece String prefix, @StringPiece String suffix, NodeDef node_def); // namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_NODE_DEF_UTIL_H_ // Parsed from tensorflow/core/framework/selective_registration.h /* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_SELECTIVE_REGISTRATION_H_ // #define TENSORFLOW_CORE_FRAMEWORK_SELECTIVE_REGISTRATION_H_ // #include // #ifdef SELECTIVE_REGISTRATION // Experimental selective registration support to reduce binary size. // // To use selective registration, when building: // 1. define SELECTIVE_REGISTRATION, e.g. in gcc by passing // -DSELECTIVE_REGISTRATION to compilation. // 2. Provide ops_to_register.h. This file is not included in the repo and must // be placed by the user or a tool where the compiler can find it. It must // define the constants and functions used in the macros below. The // functions should be defined as valid constexpr functions, so that they are // evaluated at compile time: this is needed to make symbols referenced by // un-registered objects unused, and therefore allow the linker to strip them // out. See python/tools/print_selective_registration_header.py for a tool // that can be used to generate ops_to_register.h. // // ops_to_register.h should define macros for: // // Ops for which this is false will not be registered. // SHOULD_REGISTER_OP(op) // // If this is false, then no gradient ops are registered. // SHOULD_REGISTER_OP_GRADIENT // // Op kernel classes where this is false won't be registered. // SHOULD_REGISTER_OP_KERNEL(clz) // The macros should be defined using constexprs. // #include "ops_to_register.h" // #if (!defined(SHOULD_REGISTER_OP) || !defined(SHOULD_REGISTER_OP_GRADIENT) || // !defined(SHOULD_REGISTER_OP_KERNEL)) // #endif // #else // #define SHOULD_REGISTER_OP(op) true // #define SHOULD_REGISTER_OP_GRADIENT true // #define SHOULD_REGISTER_OP_KERNEL(clz) true // #endif // #endif // TENSORFLOW_CORE_FRAMEWORK_SELECTIVE_REGISTRATION_H_ // Parsed from tensorflow/core/graph/node_builder.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_GRAPH_NODE_BUILDER_H_ // #define TENSORFLOW_CORE_GRAPH_NODE_BUILDER_H_ // #include // #include "tensorflow/core/framework/node_def_builder.h" // #include "tensorflow/core/framework/op.h" // #include "tensorflow/core/framework/op_def.pb.h" // #include "tensorflow/core/graph/graph.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/lib/core/stringpiece.h" // #include "tensorflow/core/lib/gtl/array_slice.h" // This is a helper for creating a Node and adding it to a Graph. // Internally, it uses a NodeDefBuilder to automatically set attrs // that can be inferred from the inputs, and use default values // (where they exist) for unspecified attrs. Example usage: // // Node* node; // Status status = NodeBuilder(node_name, op_name) // .Input(...) // .Attr(...) // .Finalize(&graph, &node); // if (!status.ok()) return status; // // Use node here. @Namespace("tensorflow") @NoOffset public static class NodeBuilder extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NodeBuilder(Pointer p) { super(p); } // For specifying the output of a Node to provide to one of the Input() // functions below. It supports both regular inputs (where you are // connecting to an existing Node*), and inputs from outside the graph // (or haven't been added to the graph yet, like back edges, where // you don't have a Node*). Both types can be mixed, e.g. in an // ArraySlice. @NoOffset public static class NodeOut extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NodeOut(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public NodeOut(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public NodeOut position(long position) { return (NodeOut)super.position(position); } // For referencing an existing Node. public NodeOut(Node n, int i/*=0*/) { super((Pointer)null); allocate(n, i); } private native void allocate(Node n, int i/*=0*/); public NodeOut(Node n) { super((Pointer)null); allocate(n); } private native void allocate(Node n); // For referencing Nodes not in the graph being built. It is // useful when preparing a graph for ExtendSession or creating a // back edge to a node that hasn't been added to the graph yet, // but will be. public NodeOut(@StringPiece BytePointer name, int i, @Cast("tensorflow::DataType") int t) { super((Pointer)null); allocate(name, i, t); } private native void allocate(@StringPiece BytePointer name, int i, @Cast("tensorflow::DataType") int t); public NodeOut(@StringPiece String name, int i, @Cast("tensorflow::DataType") int t) { super((Pointer)null); allocate(name, i, t); } private native void allocate(@StringPiece String name, int i, @Cast("tensorflow::DataType") int t); // Default constructor for std::vector. public NodeOut() { super((Pointer)null); allocate(); } private native void allocate(); public native Node node(); public native NodeOut node(Node node); // error is set to true if: // * the NodeOut was default constructed and never overwritten, // * a nullptr Node* was passed to the NodeOut constructor, or // * an out-of-range index was passed to the NodeOut constructor. public native @Cast("bool") boolean error(); public native NodeOut error(boolean error); public native @StdString BytePointer name(); public native NodeOut name(BytePointer name); public native int index(); public native NodeOut index(int index); public native @Cast("tensorflow::DataType") int dt(); public native NodeOut dt(int dt); } // Specify the name and the Op (either via an OpDef or the name of // the Op plus a registry) for the Node. Other fields are // specified by calling the methods below. // REQUIRES: The OpDef must satisfy ValidateOpDef(). public NodeBuilder(@StringPiece BytePointer name, @StringPiece BytePointer op_name, @Const OpRegistryInterface op_registry/*=tensorflow::OpRegistry::Global()*/) { super((Pointer)null); allocate(name, op_name, op_registry); } private native void allocate(@StringPiece BytePointer name, @StringPiece BytePointer op_name, @Const OpRegistryInterface op_registry/*=tensorflow::OpRegistry::Global()*/); public NodeBuilder(@StringPiece BytePointer name, @StringPiece BytePointer op_name) { super((Pointer)null); allocate(name, op_name); } private native void allocate(@StringPiece BytePointer name, @StringPiece BytePointer op_name); public NodeBuilder(@StringPiece String name, @StringPiece String op_name, @Const OpRegistryInterface op_registry/*=tensorflow::OpRegistry::Global()*/) { super((Pointer)null); allocate(name, op_name, op_registry); } private native void allocate(@StringPiece String name, @StringPiece String op_name, @Const OpRegistryInterface op_registry/*=tensorflow::OpRegistry::Global()*/); public NodeBuilder(@StringPiece String name, @StringPiece String op_name) { super((Pointer)null); allocate(name, op_name); } private native void allocate(@StringPiece String name, @StringPiece String op_name); public NodeBuilder(@StringPiece BytePointer name, @Const OpDef op_def) { super((Pointer)null); allocate(name, op_def); } private native void allocate(@StringPiece BytePointer name, @Const OpDef op_def); public NodeBuilder(@StringPiece String name, @Const OpDef op_def) { super((Pointer)null); allocate(name, op_def); } private native void allocate(@StringPiece String name, @Const OpDef op_def); // Create a NodeBuilder from an existing NodeDefBuilder. public NodeBuilder(@Const @ByRef NodeDefBuilder def_builder) { super((Pointer)null); allocate(def_builder); } private native void allocate(@Const @ByRef NodeDefBuilder def_builder); // You must call one Input() function per input_arg in the Op, // *and in the same order as the input_args appear in the OpDef.* // For inputs that take a single tensor. public native @ByRef NodeBuilder Input(Node src_node, int src_index/*=0*/); public native @ByRef NodeBuilder Input(Node src_node); public native @ByRef NodeBuilder Input(@ByVal NodeOut src); // For inputs that take a list of tensors. // Require that this node run after src_node(s). public native @ByRef NodeBuilder ControlInput(Node src_node); public native @ByRef NodeBuilder ControlInputs(@ByVal NodeVector src_nodes); // Sets the "requested device spec" in the NodeDef (not the // "assigned device" in the Node). public native @ByRef NodeBuilder Device(@StringPiece BytePointer device_spec); public native @ByRef NodeBuilder Device(@StringPiece String device_spec); // Sets the device name in the "assigned device" field in tensorflow::Node. public native @ByRef NodeBuilder AssignedDevice(@StringPiece BytePointer device); public native @ByRef NodeBuilder AssignedDevice(@StringPiece String device); // Set the value of an attr. attr_name must match the name of one of // attrs defined by the Op, and value must have the corresponding type // (see SetAttrValue() in ../framework/attr_value_util.h for legal // types for value). Note that attrs will be set automatically if // they can be determined by the inputs. // Validates the described node and adds it to *graph, adding edges // for all (non-back) inputs. If created_node is not nullptr, // *created_node will be set to the new node (or nullptr on error). public native @ByVal Status Finalize(Graph graph, @Cast("tensorflow::Node**") PointerPointer created_node); public native @ByVal Status Finalize(Graph graph, @ByPtrPtr Node created_node); // Accessors for the values set in the constructor. public native @StdString BytePointer node_name(); public native @Const @ByRef OpDef op_def(); } // IMPLEMENTATION ------------------------------------------------------------- // namespace tensorflow // #endif // TENSORFLOW_CORE_GRAPH_NODE_BUILDER_H_ // Parsed from tensorflow/core/graph/graph_def_builder.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_GRAPH_GRAPH_DEF_BUILDER_H_ // #define TENSORFLOW_CORE_GRAPH_GRAPH_DEF_BUILDER_H_ // #include // #include "tensorflow/core/framework/graph.pb.h" // #include "tensorflow/core/framework/op.h" // #include "tensorflow/core/graph/graph.h" // #include "tensorflow/core/graph/node_builder.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/lib/core/stringpiece.h" // #include "tensorflow/core/lib/gtl/array_slice.h" // Given a function like: // namespace ops { // Node* Identity(NodeOut input, const GraphDefBuilder::Options& opts) { // if (opts.HaveError()) return nullptr; // static const string kOpName = "Identity"; // NodeBuilder node_builder(opts.GetNameForOp(kOpName), kOpName, // opts.op_registry()); // node_builder.Input(input); // return opts.FinalizeBuilder(&node_builder); // } // } // namespace ops // // // Or, alternatively: // namespace ops { // Node* Identity(NodeOut input, const GraphDefBuilder::Options& opts) { // static const string kOpName = "Identity"; // return UnaryOp(kOpName, input, opts); // } // } // namespace ops // // You call it like: // GraphDefBuilder b; // using namespace ::tensorflow::ops; // NOLINT(build/namespaces) // Node* na = Const(7, b.opts()); // // Note: WithName() returns a copy, opts is unchanged. // Node* nb = Const(5, b.opts().WithName("control-input")); // Node* nc = Identity(na, b.opts().WithControlInput(nb)); // GraphDef graph_def; // Status status = b.ToGraphDef(&graph_def); // if (!status.ok()) { /* Handle error */ } // // In tests you can skip the status handling via: // GraphDefBuilder b(GraphDefBuilder::kFailImmediately); // ... // b.ToGraphDef(&graph_def); @Namespace("tensorflow") @NoOffset public static class GraphDefBuilder extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GraphDefBuilder(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public GraphDefBuilder(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public GraphDefBuilder position(long position) { return (GraphDefBuilder)super.position(position); } // Options for adding a Node to a Graph. @NoOffset public static class Options extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Options(Pointer p) { super(p); } // Sets the Graph (that Nodes will be added to) and the status. The // status may be set to nullptr, in which case errors cause CHECK // failures. The graph and status must outlive *this. public Options(Graph graph, Status status) { super((Pointer)null); allocate(graph, status); } private native void allocate(Graph graph, Status status); // Methods for setting options. These are const methods: they // return a copy of *this with the option set. public native @ByVal Options WithName(@StringPiece BytePointer name); public native @ByVal Options WithName(@StringPiece String name); public native @ByVal Options WithDevice(@StringPiece BytePointer device); public native @ByVal Options WithDevice(@StringPiece String device); public native @ByVal Options WithControlInput(Node control_input); public native @ByVal Options WithControlInputs(@ByVal NodeVector control_inputs); // Override the default value for an optional attr. public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, int value); public native @ByVal Options WithAttr(@StringPiece String attr_name, int value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @ArraySlice IntPointer value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @ArraySlice IntBuffer value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @ArraySlice int... value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @ArraySlice IntPointer value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @ArraySlice IntBuffer value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @ArraySlice int... value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @Cast("long long") long value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @Cast("long long") long value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @Cast("long long*") @ArraySlice LongPointer value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @Cast("long long*") @ArraySlice LongBuffer value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @Cast("long long*") @ArraySlice long... value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @Cast("long long*") @ArraySlice LongPointer value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @Cast("long long*") @ArraySlice LongBuffer value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @Cast("long long*") @ArraySlice long... value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, float value); public native @ByVal Options WithAttr(@StringPiece String attr_name, float value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @ArraySlice FloatPointer value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @ArraySlice FloatBuffer value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @ArraySlice float... value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @ArraySlice FloatPointer value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @ArraySlice FloatBuffer value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @ArraySlice float... value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, double value); public native @ByVal Options WithAttr(@StringPiece String attr_name, double value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @ArraySlice DoublePointer value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @ArraySlice DoubleBuffer value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @ArraySlice double... value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @ArraySlice DoublePointer value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @ArraySlice DoubleBuffer value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @ArraySlice double... value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @Cast("bool") boolean value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @Cast("bool") boolean value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @Cast("bool*") @ArraySlice BoolPointer value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @Cast("bool*") @ArraySlice boolean... value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @StdString BytePointer value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @StdString String value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @ByVal @Cast("tensorflow::gtl::ArraySlice*") StringVector value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @ByVal Tensor value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @ByVal Tensor value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @ByVal TensorVector value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @ByVal TensorVector value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @ByVal TensorProto value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @ByVal TensorProto value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @ByVal TensorProtoVector value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @ByVal TensorProtoVector value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @ByVal TensorShape value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @ByVal TensorShape value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @ByVal @Cast("tensorflow::gtl::ArraySlice*") TensorShapeVector value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @ByVal @Cast("tensorflow::gtl::ArraySlice*") TensorShapeVector value); public native @ByVal Options WithAttr(@StringPiece BytePointer attr_name, @ByVal NameAttrList value); public native @ByVal Options WithAttr(@StringPiece String attr_name, @ByVal NameAttrList value); // Note: overload needed to allow {...} expressions for value. // Methods for using options from a function that creates a Node. // Returns true if the status associated with *this has an error. // Use this to skip processing that may depend on prior results. public native @Cast("bool") boolean HaveError(); // Returns a string representation of the status associated with *this. // Returns the string `"OK"` if the status doesn't have any error. public native @StdString BytePointer StatusToString(); // Given the Op type name, return a name for a node of that type. // Uses the value set in WithName() if that has been called. Otherwise, // returns a name built out of the Op type name. public native @StdString BytePointer GetNameForOp(@StringPiece BytePointer op); public native @StdString String GetNameForOp(@StringPiece String op); // Sets the device, adds control inputs, adds attrs, and calls Finalize(). // If Finalize returns an error, it is saved and this function returns // nullptr. public native Node FinalizeBuilder(NodeBuilder builder); // Updates the associated status, if any, or calls TF_CHECK_OK if none. public native void UpdateStatus(@Const @ByRef Status status); // Accessor public native @Const OpRegistryInterface op_registry(); } // Start building a new graph. public GraphDefBuilder( @Const OpRegistryInterface op_registry/*=tensorflow::OpRegistry::Global()*/) { super((Pointer)null); allocate(op_registry); } private native void allocate( @Const OpRegistryInterface op_registry/*=tensorflow::OpRegistry::Global()*/); public GraphDefBuilder() { super((Pointer)null); allocate(); } private native void allocate(); // For use in tests, where you want to fail immediately on error instead // of checking the status at the end. /** enum tensorflow::GraphDefBuilder::TestFailImmediatelyType */ public static final int kFailImmediately = 0; public GraphDefBuilder( @Cast("tensorflow::GraphDefBuilder::TestFailImmediatelyType") int arg0, @Const OpRegistryInterface op_registry/*=tensorflow::OpRegistry::Global()*/) { super((Pointer)null); allocate(arg0, op_registry); } private native void allocate( @Cast("tensorflow::GraphDefBuilder::TestFailImmediatelyType") int arg0, @Const OpRegistryInterface op_registry/*=tensorflow::OpRegistry::Global()*/); public GraphDefBuilder( @Cast("tensorflow::GraphDefBuilder::TestFailImmediatelyType") int arg0) { super((Pointer)null); allocate(arg0); } private native void allocate( @Cast("tensorflow::GraphDefBuilder::TestFailImmediatelyType") int arg0); // Gets the Options with the associated Graph and Status. public native @Const @ByRef Options opts(); // Once all the nodes have been added, call this to get whether it was // successful, and if so fill *graph_def. public native @ByVal Status ToGraphDef(GraphDef graph_def); // Adds the function and gradient definitions in `fdef_lib` to this graph's op // registry. Ignores duplicate functions, and returns a bad status if an // imported function differs from an existing function or op with the same // name. public native @ByVal Status AddFunctionLibrary(@Const @ByRef FunctionDefLibrary fdef_lib); // Returns whether a user-defined function with `name` already exists in the // graph. public native @Cast("bool") boolean HasFunction(@StdString BytePointer name); public native @Cast("bool") boolean HasFunction(@StdString String name); } // A NodeOut may either be a regular input or back input. Regular // inputs are specified via either a Node* or a Node* and an output // index. Back inputs are specified by a node name, output index, and // output type. // For adding an Op with no inputs to a GraphDefBuilder. @Namespace("tensorflow::ops") public static native Node SourceOp(@StdString BytePointer op_name, @Const @ByRef GraphDefBuilder.Options opts); @Namespace("tensorflow::ops") public static native Node SourceOp(@StdString String op_name, @Const @ByRef GraphDefBuilder.Options opts); // For adding an Op with one input to a GraphDefBuilder. @Namespace("tensorflow::ops") public static native Node UnaryOp(@StdString BytePointer op_name, @ByVal NodeBuilder.NodeOut input, @Const @ByRef GraphDefBuilder.Options opts); @Namespace("tensorflow::ops") public static native Node UnaryOp(@StdString String op_name, Node input, @Const @ByRef GraphDefBuilder.Options opts); // For adding an Op with two inputs to a GraphDefBuilder. @Namespace("tensorflow::ops") public static native Node BinaryOp(@StdString BytePointer op_name, @ByVal NodeBuilder.NodeOut a, @ByVal NodeBuilder.NodeOut b, @Const @ByRef GraphDefBuilder.Options opts); @Namespace("tensorflow::ops") public static native Node BinaryOp(@StdString String op_name, Node a, Node b, @Const @ByRef GraphDefBuilder.Options opts); // namespace ops // namespace tensorflow // #endif // TENSORFLOW_CORE_GRAPH_GRAPH_DEF_BUILDER_H_ // Parsed from tensorflow/core/graph/default_device.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_GRAPH_DEFAULT_DEVICE_H_ // #define TENSORFLOW_CORE_GRAPH_DEFAULT_DEVICE_H_ // #include // #include "tensorflow/core/framework/graph.pb.h" // #include "tensorflow/core/framework/node_def.pb.h" // Sets the default device for all nodes in graph_def to "device", // only if not already set. @Namespace("tensorflow::graph") public static native void SetDefaultDevice(@StdString BytePointer device, GraphDef graph_def); @Namespace("tensorflow::graph") public static native void SetDefaultDevice(@StdString String device, GraphDef graph_def); // namespace graph // namespace tensorflow // #endif // TENSORFLOW_CORE_GRAPH_DEFAULT_DEVICE_H_ // Parsed from tensorflow/core/graph/graph_constructor.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_GRAPH_GRAPH_CONSTRUCTOR_H_ // #define TENSORFLOW_CORE_GRAPH_GRAPH_CONSTRUCTOR_H_ // #include "tensorflow/core/framework/graph.pb.h" // #include "tensorflow/core/graph/graph.h" // #include "tensorflow/core/graph/tensor_id.h" // #include "tensorflow/core/lib/core/status.h" // Construct a Graph *g out of a GraphDef gdef. Returns non-OK on // error, in which case *g is left in an incomplete state. // // *g is expected to be an empty graph (with no more than a source and sink // nodes) when provided to ConvertGraphDefToGraph. To enhance an existing Graph, // see ImportGraphDef. @Namespace("tensorflow") @NoOffset public static class GraphConstructorOptions extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GraphConstructorOptions(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public GraphConstructorOptions(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public GraphConstructorOptions position(long position) { return (GraphConstructorOptions)super.position(position); } public GraphConstructorOptions() { super((Pointer)null); allocate(); } private native void allocate(); // If true, allows internal ops in the GraphDef. public native @Cast("bool") boolean allow_internal_ops(); public native GraphConstructorOptions allow_internal_ops(boolean allow_internal_ops); // If true, the graph def is expected to have fully specified // devices for all nodes. A node in the resulting graph "g" has the // device name set accordingly. // // TODO(zhifengc): if possible, consider removing this option. public native @Cast("bool") boolean expect_device_spec(); public native GraphConstructorOptions expect_device_spec(boolean expect_device_spec); } @Namespace("tensorflow") public static native @ByVal Status ConvertGraphDefToGraph(@Const @ByRef GraphConstructorOptions opts, @Const @ByRef GraphDef gdef, Graph g); // Same as ConvertGraphDefToGraph, but takes just nodes. Used by function // instantiation. // TODO(irving): This will turn into std::vector soon. @Namespace("tensorflow") public static native @ByVal Status ConvertNodeDefsToGraph(@Const @ByRef GraphConstructorOptions opts, @ArraySlice NodeDef nodes, Graph g); // Options for calling ImportGraphDef(). @Namespace("tensorflow") @NoOffset public static class ImportGraphDefOptions extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ImportGraphDefOptions(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public ImportGraphDefOptions(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public ImportGraphDefOptions position(long position) { return (ImportGraphDefOptions)super.position(position); } public ImportGraphDefOptions() { super((Pointer)null); allocate(); } private native void allocate(); // Name prefix to use for nodes imported from the GraphDef. For example, if // prefix="animals" and GraphDef contains a node "bunny" then the node will be // named "animals/bunny" in *g. Must not be already used as a node name or // prefix in the graph. public native @StdString BytePointer prefix(); public native ImportGraphDefOptions prefix(BytePointer prefix); // If true, imported node names will be modified if their name already exists // in the graph. If false, conflicting names will be treated as an error. Note // that this option has no effect if `prefix` is specified, since `prefix` // will guarantee all node names are unique. public native @Cast("bool") boolean uniquify_names(); public native ImportGraphDefOptions uniquify_names(boolean uniquify_names); // If true, `prefix` will be modified if it already exists as a node name or // prefix in the graph. If false, a conflicting prefix will be treated as an // error. This option has no effect if `prefix` isn't specified. public native @Cast("bool") boolean uniquify_prefix(); public native ImportGraphDefOptions uniquify_prefix(boolean uniquify_prefix); // Maps tensors in `gdef` to existing tensors in `g`. Inputs in `gdef` // corresponding to `input_map` keys will be remapped to the nodes in `g` // corresponding to the values. // // Keys should not include `prefix`, i.e., a key ID's name should be the name // as it originally appears in `gdef`. // // If this is non-empty, ImportGraphDef must be called with the shape refiner // used to create the existing nodes referenced in `input_map`. // TODO(skyewm): can we remove this requirement? How do we access the original // shape refiner? public native @ByRef SafeTensorIdTensorIdMap input_map(); public native ImportGraphDefOptions input_map(SafeTensorIdTensorIdMap input_map); // If true, nodes that will have all output edges removed because of // overrides in `input_map` will not be imported. public native @Cast("bool") boolean skip_mapped_nodes(); public native ImportGraphDefOptions skip_mapped_nodes(boolean skip_mapped_nodes); // The names of existing nodes in `g` that the imported graph should have // control dependencies on. // // Note that to avoid creating many redundant control edges, ImportGraphDef() // won't add control edges to nodes that will inherit the dependencies from // other nodes in `gdef`. public native @ByRef StringVector control_dependencies(); public native ImportGraphDefOptions control_dependencies(StringVector control_dependencies); // Tensors in `gdef` that will be returned via the ImportGraphDefResults // output parameter of `ImportGraphDef()`. If this list is non-empty, the // caller must pass a results object to `ImportGraphDef()`. The // `return_tensors` field will be populated with the imported nodes in `g`. // // Entries should not include `prefix`, i.e., each ID's name should be the // name as it originally appears in `gdef`. // // If this contains a tensor that's also being remapped via `input_map`, the // corresponding existing tensor in `g` will be returned. public native @StdVector SafeTensorId return_tensors(); public native ImportGraphDefOptions return_tensors(SafeTensorId return_tensors); // The names of nodes in `gdef` that will be returned via the // ImportGraphDefResults output parameter of `ImportGraphDef()`. If this list // is non-empty, the caller must pass a results object to // `ImportGraphDef()`. The `return_nodes` field will be populated with the // imported nodes in `g`. // // Entries should not include `prefix`, i.e., each node's name should be the // name as it originally appears in `gdef`. // // Unlike `return_tensors`, `input_map` has no effect on the nodes // returned. `return_nodes` must be empty if `skip_mapped_nodes` is true. // TODO(skyewm): make this work with `skip_mapped_nodes` if there's a need. public native @ByRef StringVector return_nodes(); public native ImportGraphDefOptions return_nodes(StringVector return_nodes); // If true, checks that all colocation constraints are nodes in the GraphDef. public native @Cast("bool") boolean validate_colocation_constraints(); public native ImportGraphDefOptions validate_colocation_constraints(boolean validate_colocation_constraints); // If false skips shape validation. public native @Cast("bool") boolean validate_shape(); public native ImportGraphDefOptions validate_shape(boolean validate_shape); // TODO(ashankar): Enable handling of GraphDefs produced by newer binaries // with ops that are not defined in the binary calling ImportGraphDef. // Similar to the producer_op_list argument to import_graph_def in the // python API. } // Optional results that may be returned by ImportGraphDef. @Namespace("tensorflow") public static class ImportGraphDefResults extends Pointer { static { Loader.load(); } /** Default native constructor. */ public ImportGraphDefResults() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public ImportGraphDefResults(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ImportGraphDefResults(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public ImportGraphDefResults position(long position) { return (ImportGraphDefResults)super.position(position); } // The requested tensors associated with // ImportGraphDefOptions::return_tensors. Note that the index may be different // than the requested index if the returned tensor has been remapped according // to `input_map`. public native @StdVector NodeIndexPair return_tensors(); public native ImportGraphDefResults return_tensors(NodeIndexPair return_tensors); // The requested nodes associated with ImportGraphDefOptions::return_nodes. public native @ByRef NodeVector return_nodes(); public native ImportGraphDefResults return_nodes(NodeVector return_nodes); // Keys in ImportGraphDefOptions::input_map that don't appear in `gdef` and // weren't used as an input to any node in `gdef`. These keys are likely due // to typos, and callers may wish to treat their existence as an error. public native @StdVector SafeTensorId missing_unused_input_map_keys(); public native ImportGraphDefResults missing_unused_input_map_keys(SafeTensorId missing_unused_input_map_keys); } // Adds the graph in GraphDef `gdef` into an existing Graph `*g`. // // On error, returns non-OK and leaves `*g` unmodified. // // `refiner` can be null. It should be non-null if the caller // intends to add additional nodes to the graph after the import. This // allows the caller to validate shapes of those nodes (since // ShapeRefiner::AddNode must be called in topological order). // // `results` must be non-null if `opts.return_tensors` or `opts.result_nodes` is // non-empty. It can also be set to fetch the unused input map keys. If it's // non-null, all the vector fields must be empty. // // TODO(ashankar): Push this mechanism and get rid of Session::Extend() // as a means of enhancing an existing Graph. @Namespace("tensorflow") public static native @ByVal Status ImportGraphDef(@Const @ByRef ImportGraphDefOptions opts, @Const @ByRef GraphDef gdef, Graph g, ShapeRefiner refiner, ImportGraphDefResults results/*=nullptr*/); @Namespace("tensorflow") public static native @ByVal Status ImportGraphDef(@Const @ByRef ImportGraphDefOptions opts, @Const @ByRef GraphDef gdef, Graph g, ShapeRefiner refiner); // Make a copy of "src" into "*dest". // // REQUIRES: "*dest" is a freshly allocated graph without any nodes or edges // other than the implicit Source/Sink nodes. @Namespace("tensorflow") public static native void CopyGraph(@Const @ByRef Graph src, Graph dest); // namespace tensorflow // #endif // TENSORFLOW_CORE_GRAPH_GRAPH_CONSTRUCTOR_H_ // Parsed from tensorflow/core/graph/gradients.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_GRAPH_GRADIENTS_H_ // #define TENSORFLOW_CORE_GRAPH_GRADIENTS_H_ // #include "tensorflow/core/graph/graph.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/lib/gtl/array_slice.h" // Represents the output of 'node' at 'index'. @Namespace("tensorflow") public static class NodeOut extends Pointer { static { Loader.load(); } /** Default native constructor. */ public NodeOut() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public NodeOut(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NodeOut(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public NodeOut position(long position) { return (NodeOut)super.position(position); } public native Node node(); public native NodeOut node(Node node); public native int index(); public native NodeOut index(int index); // Returns the string name that represents the output of this node. public native @StdString BytePointer name(); // Returns the data type of the output of this node. public native @Cast("tensorflow::DataType") int dtype(); } // NOTE: This API is a work in progress and will likely be changing frequently. // // Given initial gradient-node outputs 'y_grad_node_outputs' (which compute the // symbolic partial derivatives of some loss function 'L' w.r.t the node outputs // 'y_node_outputs'), adds gradient nodes to 'graph' that compute the symbolic // partial derivatives of 'L' w.r.t the node outputs 'x_node_outputs'. // // REQUIRES: Each node in 'x_node_outputs' to be unique, and so to have a single // output (this restriction will be removed in a subsequent change). // TODO(andydavis) Add symbolic gradient support for general graphs (the current // implementation only supports gradients for functions). In particular, // the nodes in 'x_nodes' are currently restricted to have one output. @Namespace("tensorflow") public static native @ByVal Status AddSymbolicGradients(@ArraySlice NodeOut y_node_outputs, @ArraySlice NodeOut x_node_outputs, @ArraySlice NodeOut y_grad_node_outputs, @StdVector NodeOut x_grad_node_outputs, Graph graph); // namespace tensorflow // #endif // TENSORFLOW_CORE_GRAPH_GRADIENTS_H_ // Parsed from tensorflow/cc/framework/scope.h /* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CC_FRAMEWORK_SCOPE_H_ // #define TENSORFLOW_CC_FRAMEWORK_SCOPE_H_ // #include // #include // #include // #include // #include // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/lib/gtl/array_slice.h" /** \addtogroup core * \{

* A {@code Scope} object represents a set of related TensorFlow ops that have the * same properties such as a common name prefix. * * A Scope object is a container for TensorFlow Op properties. Op constructors * get a Scope object as a mandatory first argument and the constructed op * acquires the properties in the object. * * A simple example: * * using namespace ops; * Scope root = Scope::NewRootScope(); * auto c1 = Const(root, { {1, 1} }); * auto m = MatMul(root, c1, { {41}, {1} }); * GraphDef gdef; * Status s = root.ToGraphDef(&gdef); * if (!s.ok()) { ... } * * Scope hierarchy: * * The Scope class provides various With<> functions that create a new scope. * The new scope typically has one property changed while other properties are * inherited from the parent scope. * NewSubScope(name) method appends {@code name} to the prefix of names for ops * created within the scope, and WithOpName() changes the suffix which * otherwise defaults to the type of the op. * * Name examples: * * Scope root = Scope::NewRootScope(); * Scope linear = root.NewSubScope("linear"); * // W will be named "linear/W" * auto W = Variable(linear.WithOpName("W"), * {2, 2}, DT_FLOAT); * // b will be named "linear/b" * auto b = Variable(linear.WithOpName("b"), * {2}, DT_FLOAT); * auto x = Const(linear, {...}); // name: "linear/Const" * auto m = MatMul(linear, x, W); // name: "linear/MatMul" * auto r = BiasAdd(linear, m, b); // name: "linear/BiasAdd" * * Scope lifetime: * * A new scope is created by calling Scope::NewRootScope. This creates some * resources that are shared by all the child scopes that inherit from this * scope, directly or transitively. For instance, a new scope creates a new * Graph object to which operations are added when the new scope or its * children are used by an Op constructor. The new scope also has a Status * object which will be used to indicate errors by Op-constructor functions * called on any child scope. The Op-constructor functions have to check the * scope's status by calling the ok() method before proceeding to construct the * op. * * Thread safety: * * A {@code Scope} object is NOT thread-safe. Threads cannot concurrently call * op-constructor functions on the same {@code Scope} object. */ @Namespace("tensorflow") @NoOffset public static class Scope extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Scope(Pointer p) { super(p); } public Scope(@Const @ByRef Scope other) { super((Pointer)null); allocate(other); } private native void allocate(@Const @ByRef Scope other); public native @ByRef @Name("operator =") Scope put(@Const @ByRef Scope other); // The following functions are for users making graphs. They return brand new // scopes, or scopes derived from an existing scope object. /** Return a new scope. * This creates a new graph and all operations constructed in this graph * should use the returned object as the "root" scope. */ public static native @ByVal Scope NewRootScope(); /** Return a new scope. Ops created with this scope will have * {@code name/child_scope_name} as the prefix. The actual name will be unique * in the current scope. All other properties are inherited from the current * scope. If {@code child_scope_name} is empty, the {@code /} is elided. */ public native @ByVal Scope NewSubScope(@StdString BytePointer child_scope_name); public native @ByVal Scope NewSubScope(@StdString String child_scope_name); /** Return a new scope. All ops created within the returned scope will have * names of the form {@code name/op_name[_suffix]}. */ public native @ByVal Scope WithOpName(@StdString BytePointer op_name); public native @ByVal Scope WithOpName(@StdString String op_name); /** Return a new scope. All ops created within the returned scope will have as * control dependencies the union of operations in the control_deps vector * and the control dependencies of the current scope. */ public native @ByVal Scope WithControlDependencies( @ArraySlice Operation control_deps); /** Same as above, but convenient to add control dependency on the operation * producing the control_dep output. */ public native @ByVal Scope WithControlDependencies(@Const @ByRef Output control_dep); /** Return a new scope. All ops created within the returned scope will have no * control dependencies on other operations. */ public native @ByVal Scope WithNoControlDependencies(); /** Return a new scope. All ops created within the returned scope will have * the device field set to 'device'. */ public native @ByVal Scope WithDevice(@StdString BytePointer device); public native @ByVal Scope WithDevice(@StdString String device); /** Returns a new scope. All ops created within the returned scope will have * their assigned device set to {@code assigned_device}. */ public native @ByVal Scope WithAssignedDevice(@StdString BytePointer assigned_device); public native @ByVal Scope WithAssignedDevice(@StdString String assigned_device); /** Return a new scope. All ops created within the returned scope will be * co-located on the device where op is placed. * NOTE: This function is intended to be use internal libraries only for * controlling placement of ops on to devices. Public use is not encouraged * because the implementation of device placement is subject to change. */ public native @ByVal Scope ColocateWith(@Const @ByRef Operation op); /** Convenience function for above. */ public native @ByVal Scope ColocateWith(@Const @ByRef Output out); /** Clear all colocation constraints. */ public native @ByVal Scope ClearColocation(); /** Return a new scope. The op-constructor functions taking the returned scope * as the scope argument will exit as soon as an error is detected, instead * of setting the status on the scope. */ public native @ByVal Scope ExitOnError(); /** Return a new scope. All ops created with the new scope will have * kernel_label as the value for their '_kernel' attribute; */ public native @ByVal Scope WithKernelLabel(@StdString BytePointer kernel_label); public native @ByVal Scope WithKernelLabel(@StdString String kernel_label); // The following functions are for scope object consumers. /** Return a unique name, using default_name if an op name has not been * specified. */ public native @StdString BytePointer GetUniqueNameForOp(@StdString BytePointer default_name); public native @StdString String GetUniqueNameForOp(@StdString String default_name); /** Update the status on this scope. * Note: The status object is shared between all children of this scope. * If the resulting status is not Status::OK() and exit_on_error_ is set on * this scope, this function exits by calling LOG(FATAL). */ public native void UpdateStatus(@Const @ByVal Status s); // START_SKIP_DOXYGEN /** Update the builder with properties accumulated in this scope. Does not set * status(). */ // TODO(skyewm): NodeBuilder is not part of public API public native void UpdateBuilder(NodeBuilder builder); // END_SKIP_DOXYGEN public native @Cast("bool") boolean ok(); // TODO(skyewm): Graph is not part of public API public native Graph graph(); // TODO(skyewm): Graph is not part of public API public native @SharedPtr Graph graph_as_shared_ptr(); public native @ByVal Status status(); /** If status() is Status::OK(), convert the Graph object stored in this scope * to a GraphDef proto and return Status::OK(). Otherwise, return the error * status as is without performing GraphDef conversion. */ public native @ByVal Status ToGraphDef(GraphDef gdef); // START_SKIP_DOXYGEN /** If status() is Status::OK(), construct a Graph object using the default * GraphConstructorOptions, and return Status::OK if graph construction was * successful. Otherwise, return the error status. */ // TODO(josh11b, keveman): Make this faster; right now it converts // Graph->GraphDef->Graph. This cleans up the graph (e.g. adds // edges from the source and to the sink node, resolves back edges // by name), and makes sure the resulting graph is valid. public native @ByVal Status ToGraph(Graph g); // Calls AddNode() using this scope's ShapeRefiner. This exists in the public // API to prevent custom op wrappers from needing access to shape_refiner.h or // scope_internal.h. // TODO(skyewm): remove this from public API public native @ByVal Status DoShapeInference(Node node); // Creates a new root scope that causes all DoShapeInference() calls to return // Status::OK() (on the returned scope and any subscopes). Used for testing. // TODO(skyewm): fix tests that still require this and eventually remove, or // at least remove from public API public static native @ByVal Scope DisabledShapeInferenceScope(); // END_SKIP_DOXYGEN public native @StdVector Operation control_deps(); // START_SKIP_DOXYGEN @Opaque public static class Impl extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public Impl() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Impl(Pointer p) { super(p); } } public native Impl impl(); } /** A helper struct to hold the scopes that would be used by a function * constructing a composite op. */ /** \} */ // namespace tensorflow // #endif // TENSORFLOW_CC_FRAMEWORK_SCOPE_H_ // Parsed from tensorflow/cc/framework/ops.h /* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CC_FRAMEWORK_OPS_H_ // #define TENSORFLOW_CC_FRAMEWORK_OPS_H_ // #include // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor.pb.h" // #include "tensorflow/core/graph/graph.h" // #include "tensorflow/core/lib/hash/hash.h" // #include "tensorflow/core/lib/strings/strcat.h" /** \defgroup core Core Tensorflow API */ /** \addtogroup core * \{

* Represents a node in the computation graph. */ @Namespace("tensorflow") @NoOffset public static class Operation extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Operation(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Operation(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public Operation position(long position) { return (Operation)super.position(position); } public Operation() { super((Pointer)null); allocate(); } private native void allocate(); public Operation(Node n) { super((Pointer)null); allocate(n); } private native void allocate(Node n); public native int num_inputs(); public native @Cast("tensorflow::DataType") int input_type(int o); public native @ByVal Output input(int i); public native int num_outputs(); public native @Cast("tensorflow::DataType") int output_type(int o); public native @ByVal Output output(int i); public native Node node(); public native @Cast("tensorflow::uint64") long hash(int index); public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef Operation other); } /** Represents a tensor value produced by an Operation. */ @Namespace("tensorflow") @NoOffset public static class Output extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Output(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Output(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public Output position(long position) { return (Output)super.position(position); } public Output() { super((Pointer)null); allocate(); } private native void allocate(); public Output(Node n) { super((Pointer)null); allocate(n); } private native void allocate(Node n); public Output(Node n, int index) { super((Pointer)null); allocate(n, index); } private native void allocate(Node n, int index); public Output(@Const @ByRef Operation op, int index) { super((Pointer)null); allocate(op, index); } private native void allocate(@Const @ByRef Operation op, int index); public native @ByVal Operation op(); public native Node node(); public native int index(); public native @Cast("tensorflow::DataType") int type(); public native @StdString BytePointer name(); public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef Output other); public native @Cast("tensorflow::uint64") long hash(); } /** Hash class that can be used for e.g. storing Outputs in an unordered_map */ @Namespace("tensorflow") public static class OutputHash extends Pointer { static { Loader.load(); } /** Default native constructor. */ public OutputHash() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public OutputHash(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OutputHash(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public OutputHash position(long position) { return (OutputHash)super.position(position); } public native @Cast("std::size_t") @Name("operator ()") long apply(@Const @ByRef Output output); } /** Represents a tensor value that can be used as an operand to an Operation. */ @Namespace("tensorflow") @NoOffset public static class Input extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Input(Pointer p) { super(p); } /** Initializer enables constructing an Input object from various kinds of C++ * constants such as simple primitive constants and nested initializer lists * representing a multi-dimensional array. Initializer constructors are all * templates, so the aforementioned kinds of C++ constants can be used to * construct an Initializer. Initializer stores the value it got constructed * with in a Tensor object. */ @NoOffset public static class Initializer extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Initializer(Pointer p) { super(p); } /** Construct from a scalar value of an arithmetic type or a type that can * be converted to a string (eg. a string literal). */ public Initializer(@Const @ByRef Tensor t) { super((Pointer)null); allocate(t); } private native void allocate(@Const @ByRef Tensor t); // NOLINT(runtime/explicit) /** Construct from a scalar value and an explicit shape */ /** Construct from a initializer list of scalars (a one-dimensional tensor). */ /** Construct from a initializer list of scalars and an explicit shape. */ /** Construct a multi-dimensional tensor from a nested initializer * list. Note that C++ syntax allows nesting of arbitrarily typed * initializer lists, so such invalid initializers cannot be disallowed at * compile time. This function performs checks to make sure that the nested * initializer list is indeed a valid multi-dimensional tensor. */ // START_SKIP_DOXYGEN // END_SKIP_DOXYGEN public native @ByVal TensorProto AsTensorProto(); public native @ByRef Status status(); public native Initializer status(Status status); public native @ByRef Tensor tensor(); public native Initializer tensor(Tensor tensor); } /** All of Input's constructors are implicit. Input can be implicitly * constructed from the following objects : * * Output: This is so that the output of an Operation can be directly used * as the input to a op wrapper, which takes Inputs. * * A scalar, or a multi-dimensional tensor specified as a recursive * initializer list. This enables directly passing constants as * inputs to op wrappers. * * A Tensor object. */ public Input(@Const @ByRef Output o) { super((Pointer)null); allocate(o); } private native void allocate(@Const @ByRef Output o); // NOLINT(runtime/explicit) public Input(@Const @ByRef Input.Initializer init) { super((Pointer)null); allocate(init); } private native void allocate(@Const @ByRef Input.Initializer init); public Input(@ByRef Tensor init) { super((Pointer)null); allocate(init); } private native void allocate(@ByRef Tensor init); public Input(byte init) { super((Pointer)null); allocate(init); } private native void allocate(byte init); public Input(short init) { super((Pointer)null); allocate(init); } private native void allocate(short init); public Input(int init) { super((Pointer)null); allocate(init); } private native void allocate(int init); public Input(long init) { super((Pointer)null); allocate(init); } private native void allocate(long init); public Input(float init) { super((Pointer)null); allocate(init); } private native void allocate(float init); public Input(double init) { super((Pointer)null); allocate(init); } private native void allocate(double init); public Input(boolean init) { super((Pointer)null); allocate(init); } private native void allocate(boolean init); public Input(@StdString String init) { super((Pointer)null); allocate(init); } private native void allocate(@StdString String init); public Input(@StdString BytePointer init) { super((Pointer)null); allocate(init); } private native void allocate(@StdString BytePointer init); /** Constructor specifying a node name, index and datatype. This should only * be used for specifying a backward edge, needed by control flow. */ public Input(@StdString BytePointer name, int i, @Cast("tensorflow::DataType") int dt) { super((Pointer)null); allocate(name, i, dt); } private native void allocate(@StdString BytePointer name, int i, @Cast("tensorflow::DataType") int dt); public Input(@StdString String name, int i, @Cast("tensorflow::DataType") int dt) { super((Pointer)null); allocate(name, i, dt); } private native void allocate(@StdString String name, int i, @Cast("tensorflow::DataType") int dt); public native Node node(); public native @StdString BytePointer node_name(); public native int index(); public native @Cast("tensorflow::DataType") int data_type(); public native @ByVal Status status(); public native @Const @ByRef Tensor tensor(); } /** A type for representing the output of ops that produce more than one output, * or a list of tensors. */ /** A type for representing the input to ops that require a list of tensors. */ @Namespace("tensorflow") @NoOffset public static class InputList extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InputList(Pointer p) { super(p); } /** Implicitly convert a list of outputs to a list of inputs. This is useful * to write code such as ops::Concat(ops::Split(x, 4)). */ public InputList(@Cast("const tensorflow::OutputList*") @ByRef OutputVector out) { super((Pointer)null); allocate(out); } private native void allocate(@Cast("const tensorflow::OutputList*") @ByRef OutputVector out); public InputList(@ArraySlice Input inputs) { super((Pointer)null); allocate(inputs); } private native void allocate(@ArraySlice Input inputs); } /** \} */ // namespace tensorflow // #endif // TENSORFLOW_CC_FRAMEWORK_OPS_H_ // Parsed from tensorflow/core/framework/op_gen_lib.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CORE_FRAMEWORK_OP_GEN_LIB_H_ // #define TENSORFLOW_CORE_FRAMEWORK_OP_GEN_LIB_H_ // #include // #include // #include "tensorflow/core/framework/api_def.pb.h" // #include "tensorflow/core/framework/op_def.pb.h" // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/lib/core/stringpiece.h" // #include "tensorflow/core/platform/env.h" // Forward declare protos so their symbols can be removed from .so exports @Namespace("tensorflow") public static native @StdString BytePointer Spaces(int n); // Wrap prefix + str to be at most width characters, indenting every line // after the first by prefix.size() spaces. Intended use case is something // like prefix = " Foo(" and str is a list of arguments (terminated by a ")"). // TODO(josh11b): Option to wrap on ", " instead of " " when possible. @Namespace("tensorflow") public static native @StdString BytePointer WordWrap(@StringPiece BytePointer prefix, @StringPiece BytePointer str, int width); @Namespace("tensorflow") public static native @StdString String WordWrap(@StringPiece String prefix, @StringPiece String str, int width); // Looks for an "=" at the beginning of *description. If found, strips it off // (and any following spaces) from *description and return true. Otherwise // returns false. @Namespace("tensorflow") public static native @Cast("bool") boolean ConsumeEquals(@StringPiece @Cast({"char*", "StringPiece*"}) BytePointer description); // Convert text-serialized protobufs to/from multiline format. @Namespace("tensorflow") public static native @StdString BytePointer PBTxtToMultiline(@StringPiece BytePointer pbtxt, @Const @ByRef StringVector multi_line_fields); @Namespace("tensorflow") public static native @StdString String PBTxtToMultiline(@StringPiece String pbtxt, @Const @ByRef StringVector multi_line_fields); @Namespace("tensorflow") public static native @StdString BytePointer PBTxtFromMultiline(@StringPiece BytePointer multiline_pbtxt); @Namespace("tensorflow") public static native @StdString String PBTxtFromMultiline(@StringPiece String multiline_pbtxt); // Takes a list of files with ApiDefs text protos, and allows you to // look up the specific ApiDef for any given op. @Namespace("tensorflow") @NoOffset public static class ApiDefMap extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApiDefMap(Pointer p) { super(p); } // OpList must be a superset of ops of any subsequently loaded // ApiDef. public ApiDefMap(@Const @ByRef OpList op_list) { super((Pointer)null); allocate(op_list); } private native void allocate(@Const @ByRef OpList op_list); // You can call this method multiple times to load multiple // sets of files. Api definitions are merged if the same // op definition is loaded multiple times. Later-loaded // definitions take precedence. // ApiDefs loaded from files must contain a subset of ops defined // in the OpList passed to the constructor. public native @ByVal Status LoadFileList(Env env, @Const @ByRef StringVector filenames); // Load a single file. Api definitions are merged if the same // op definition is loaded multiple times. Later-loaded // definitions take precedence. // ApiDefs loaded from file must contain a subset of ops defined // in the OpList passed to the constructor. public native @ByVal Status LoadFile(Env env, @StdString BytePointer filename); public native @ByVal Status LoadFile(Env env, @StdString String filename); // Load ApiDefs from string containing ApiDefs text proto. // api_def_file_contents is expected to be in "multiline format". // ApiDefs must contain a subset of ops defined in OpsList // passed to the constructor. public native @ByVal Status LoadApiDef(@StdString BytePointer api_def_file_contents); public native @ByVal Status LoadApiDef(@StdString String api_def_file_contents); // Updates ApiDef docs. For example, if ApiDef renames an argument // or attribute, applies these renames to descriptions as well. // UpdateDocs should only be called once after all ApiDefs are loaded // since it replaces original op names. public native void UpdateDocs(); // Look up ApiDef proto based on the given graph op name. // If graph op name is not in this ApiDefMap, returns nullptr. // // Note: Returned ApiDef pointer should stay valid even after calling // Load* functions defined above. Subsequent calls to Load* might modify // returned ApiDef contents, but should never remove the ApiDef itself. public native @Const ApiDef GetApiDef(@StdString BytePointer name); public native @Const ApiDef GetApiDef(@StdString String name); } // namespace tensorflow // #endif // TENSORFLOW_CORE_FRAMEWORK_OP_GEN_LIB_H_ // Parsed from tensorflow/cc/framework/cc_op_gen.h /* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CC_FRAMEWORK_CC_OP_GEN_H_ // #define TENSORFLOW_CC_FRAMEWORK_CC_OP_GEN_H_ // #include "tensorflow/core/framework/op_def.pb.h" // #include "tensorflow/core/framework/op_gen_lib.h" // #include "tensorflow/core/platform/types.h" /** Result is written to files dot_h and dot_cc. */ @Namespace("tensorflow") public static native void WriteCCOps(@Const @ByRef OpList ops, @Const @ByRef ApiDefMap api_def_map, @StdString BytePointer dot_h_fname, @StdString BytePointer dot_cc_fname); @Namespace("tensorflow") public static native void WriteCCOps(@Const @ByRef OpList ops, @Const @ByRef ApiDefMap api_def_map, @StdString String dot_h_fname, @StdString String dot_cc_fname); // namespace tensorflow // #endif // TENSORFLOW_CC_FRAMEWORK_CC_OP_GEN_H_ // Parsed from tensorflow/cc/framework/gradients.h /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CC_FRAMEWORK_GRADIENTS_H_ // #define TENSORFLOW_CC_FRAMEWORK_GRADIENTS_H_ // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/cc/framework/scope.h" /** NOTE: This API is a work in progress and will likely be changing frequently. * * Given initial gradients 'grad_inputs' (which represent the symbolic partial * derivatives of some loss function 'L' w.r.t 'outputs'), adds gradient nodes * to the graph associated with 'scope', which compute (and return in * 'grad_outputs') the symbolic partial derivatives of 'L' w.r.t 'inputs'. */ @Namespace("tensorflow") public static native @ByVal Status AddSymbolicGradients(@Const @ByRef Scope scope, @Const @ByRef OutputVector outputs, @Const @ByRef OutputVector inputs, @Const @ByRef OutputVector grad_inputs, OutputVector grad_outputs); // Same as above, but uses 'OnesLike' for all shapes in // 'outputs' as grad_inputs. @Namespace("tensorflow") public static native @ByVal Status AddSymbolicGradients(@Const @ByRef Scope scope, @Const @ByRef OutputVector outputs, @Const @ByRef OutputVector inputs, OutputVector grad_outputs); /** Returns a sentinel Output that represents 'no gradient' (i.e. no gradient * flows along some graph edge during backpropagation). * Can be returned in 'grad_outputs' by an invocation of 'AddSymbolicGradients' * (note that gradient flow through an Output can be stopped through the use of * the StopGradient node). */ @Namespace("tensorflow") public static native @ByVal Output NoGradient(); // namespace tensorflow // #endif // TENSORFLOW_CC_FRAMEWORK_GRADIENTS_H_ // Parsed from tensorflow/core/protobuf/saver.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/protobuf/saver.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2fsaver_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2fsaver_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // #include // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fprotobuf_2fsaver_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fprotobuf_2fsaver_2eproto // namespace tensorflow // namespace protobuf // namespace google /** enum tensorflow::SaverDef_CheckpointFormatVersion */ public static final int SaverDef_CheckpointFormatVersion_LEGACY = 0, SaverDef_CheckpointFormatVersion_V1 = 1, SaverDef_CheckpointFormatVersion_V2 = 2, SaverDef_CheckpointFormatVersion_SaverDef_CheckpointFormatVersion_INT_MIN_SENTINEL_DO_NOT_USE_ = kint32min, SaverDef_CheckpointFormatVersion_SaverDef_CheckpointFormatVersion_INT_MAX_SENTINEL_DO_NOT_USE_ = kint32max; @Namespace("tensorflow") public static native @Cast("bool") boolean SaverDef_CheckpointFormatVersion_IsValid(int value); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::SaverDef_CheckpointFormatVersion") int SaverDef_CheckpointFormatVersion_CheckpointFormatVersion_MIN(); @Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::SaverDef_CheckpointFormatVersion") int SaverDef_CheckpointFormatVersion_CheckpointFormatVersion_MAX(); @Namespace("tensorflow") @MemberGetter public static native int SaverDef_CheckpointFormatVersion_CheckpointFormatVersion_ARRAYSIZE(); @Namespace("tensorflow") public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer SaverDef_CheckpointFormatVersion_descriptor(); @Namespace("tensorflow") public static native @StdString BytePointer SaverDef_CheckpointFormatVersion_Name(@Cast("tensorflow::SaverDef_CheckpointFormatVersion") int value); @Namespace("tensorflow") public static native @Cast("bool") boolean SaverDef_CheckpointFormatVersion_Parse( @StdString BytePointer name, @Cast("tensorflow::SaverDef_CheckpointFormatVersion*") IntPointer value); @Namespace("tensorflow") public static native @Cast("bool") boolean SaverDef_CheckpointFormatVersion_Parse( @StdString String name, @Cast("tensorflow::SaverDef_CheckpointFormatVersion*") IntBuffer value); @Namespace("tensorflow") public static native @Cast("bool") boolean SaverDef_CheckpointFormatVersion_Parse( @StdString BytePointer name, @Cast("tensorflow::SaverDef_CheckpointFormatVersion*") int... value); @Namespace("tensorflow") public static native @Cast("bool") boolean SaverDef_CheckpointFormatVersion_Parse( @StdString String name, @Cast("tensorflow::SaverDef_CheckpointFormatVersion*") IntPointer value); @Namespace("tensorflow") public static native @Cast("bool") boolean SaverDef_CheckpointFormatVersion_Parse( @StdString BytePointer name, @Cast("tensorflow::SaverDef_CheckpointFormatVersion*") IntBuffer value); @Namespace("tensorflow") public static native @Cast("bool") boolean SaverDef_CheckpointFormatVersion_Parse( @StdString String name, @Cast("tensorflow::SaverDef_CheckpointFormatVersion*") int... value); // =================================================================== @Namespace("tensorflow") @NoOffset public static class SaverDef extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SaverDef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public SaverDef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public SaverDef position(long position) { return (SaverDef)super.position(position); } public SaverDef() { super((Pointer)null); allocate(); } private native void allocate(); public SaverDef(@Const @ByRef SaverDef from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef SaverDef from); public native @ByRef @Name("operator =") SaverDef put(@Const @ByRef SaverDef from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef SaverDef default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const SaverDef internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(SaverDef other); public native void Swap(SaverDef other); // implements Message ---------------------------------------------- public native SaverDef New(); public native SaverDef New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef SaverDef from); public native void MergeFrom(@Const @ByRef SaverDef from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- @MemberGetter public static native @Cast("const tensorflow::SaverDef::CheckpointFormatVersion") int LEGACY(); public static final int LEGACY = LEGACY(); @MemberGetter public static native @Cast("const tensorflow::SaverDef::CheckpointFormatVersion") int V1(); public static final int V1 = V1(); @MemberGetter public static native @Cast("const tensorflow::SaverDef::CheckpointFormatVersion") int V2(); public static final int V2 = V2(); public static native @Cast("bool") boolean CheckpointFormatVersion_IsValid(int value); @MemberGetter public static native @Cast("const tensorflow::SaverDef::CheckpointFormatVersion") int CheckpointFormatVersion_MIN(); public static final int CheckpointFormatVersion_MIN = CheckpointFormatVersion_MIN(); @MemberGetter public static native @Cast("const tensorflow::SaverDef::CheckpointFormatVersion") int CheckpointFormatVersion_MAX(); public static final int CheckpointFormatVersion_MAX = CheckpointFormatVersion_MAX(); @MemberGetter public static native int CheckpointFormatVersion_ARRAYSIZE(); public static final int CheckpointFormatVersion_ARRAYSIZE = CheckpointFormatVersion_ARRAYSIZE(); public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer CheckpointFormatVersion_descriptor(); public static native @StdString BytePointer CheckpointFormatVersion_Name(@Cast("tensorflow::SaverDef::CheckpointFormatVersion") int value); public static native @Cast("bool") boolean CheckpointFormatVersion_Parse(@StdString BytePointer name, @Cast("tensorflow::SaverDef::CheckpointFormatVersion*") IntPointer value); public static native @Cast("bool") boolean CheckpointFormatVersion_Parse(@StdString String name, @Cast("tensorflow::SaverDef::CheckpointFormatVersion*") IntBuffer value); public static native @Cast("bool") boolean CheckpointFormatVersion_Parse(@StdString BytePointer name, @Cast("tensorflow::SaverDef::CheckpointFormatVersion*") int... value); public static native @Cast("bool") boolean CheckpointFormatVersion_Parse(@StdString String name, @Cast("tensorflow::SaverDef::CheckpointFormatVersion*") IntPointer value); public static native @Cast("bool") boolean CheckpointFormatVersion_Parse(@StdString BytePointer name, @Cast("tensorflow::SaverDef::CheckpointFormatVersion*") IntBuffer value); public static native @Cast("bool") boolean CheckpointFormatVersion_Parse(@StdString String name, @Cast("tensorflow::SaverDef::CheckpointFormatVersion*") int... value); // accessors ------------------------------------------------------- // string filename_tensor_name = 1; public native void clear_filename_tensor_name(); @MemberGetter public static native int kFilenameTensorNameFieldNumber(); public static final int kFilenameTensorNameFieldNumber = kFilenameTensorNameFieldNumber(); public native @StdString BytePointer filename_tensor_name(); public native void set_filename_tensor_name(@StdString BytePointer value); public native void set_filename_tensor_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_filename_tensor_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_filename_tensor_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_filename_tensor_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_filename_tensor_name(); public native void set_allocated_filename_tensor_name(@StdString @Cast({"char*", "std::string*"}) BytePointer filename_tensor_name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_filename_tensor_name(); public native @Deprecated void unsafe_arena_set_allocated_filename_tensor_name( @StdString @Cast({"char*", "std::string*"}) BytePointer filename_tensor_name); // string save_tensor_name = 2; public native void clear_save_tensor_name(); @MemberGetter public static native int kSaveTensorNameFieldNumber(); public static final int kSaveTensorNameFieldNumber = kSaveTensorNameFieldNumber(); public native @StdString BytePointer save_tensor_name(); public native void set_save_tensor_name(@StdString BytePointer value); public native void set_save_tensor_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_save_tensor_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_save_tensor_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_save_tensor_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_save_tensor_name(); public native void set_allocated_save_tensor_name(@StdString @Cast({"char*", "std::string*"}) BytePointer save_tensor_name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_save_tensor_name(); public native @Deprecated void unsafe_arena_set_allocated_save_tensor_name( @StdString @Cast({"char*", "std::string*"}) BytePointer save_tensor_name); // string restore_op_name = 3; public native void clear_restore_op_name(); @MemberGetter public static native int kRestoreOpNameFieldNumber(); public static final int kRestoreOpNameFieldNumber = kRestoreOpNameFieldNumber(); public native @StdString BytePointer restore_op_name(); public native void set_restore_op_name(@StdString BytePointer value); public native void set_restore_op_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_restore_op_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_restore_op_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_restore_op_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_restore_op_name(); public native void set_allocated_restore_op_name(@StdString @Cast({"char*", "std::string*"}) BytePointer restore_op_name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_restore_op_name(); public native @Deprecated void unsafe_arena_set_allocated_restore_op_name( @StdString @Cast({"char*", "std::string*"}) BytePointer restore_op_name); // int32 max_to_keep = 4; public native void clear_max_to_keep(); @MemberGetter public static native int kMaxToKeepFieldNumber(); public static final int kMaxToKeepFieldNumber = kMaxToKeepFieldNumber(); public native @Cast("google::protobuf::int32") int max_to_keep(); public native void set_max_to_keep(@Cast("google::protobuf::int32") int value); // bool sharded = 5; public native void clear_sharded(); @MemberGetter public static native int kShardedFieldNumber(); public static final int kShardedFieldNumber = kShardedFieldNumber(); public native @Cast("bool") boolean sharded(); public native void set_sharded(@Cast("bool") boolean value); // float keep_checkpoint_every_n_hours = 6; public native void clear_keep_checkpoint_every_n_hours(); @MemberGetter public static native int kKeepCheckpointEveryNHoursFieldNumber(); public static final int kKeepCheckpointEveryNHoursFieldNumber = kKeepCheckpointEveryNHoursFieldNumber(); public native float keep_checkpoint_every_n_hours(); public native void set_keep_checkpoint_every_n_hours(float value); // .tensorflow.SaverDef.CheckpointFormatVersion version = 7; public native void clear_version(); @MemberGetter public static native int kVersionFieldNumber(); public static final int kVersionFieldNumber = kVersionFieldNumber(); public native @Cast("tensorflow::SaverDef_CheckpointFormatVersion") int version(); public native void set_version(@Cast("tensorflow::SaverDef_CheckpointFormatVersion") int value); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // SaverDef // string filename_tensor_name = 1; // #if LANG_CXX11 // #endif // string save_tensor_name = 2; // #if LANG_CXX11 // #endif // string restore_op_name = 3; // #if LANG_CXX11 // #endif // int32 max_to_keep = 4; // bool sharded = 5; // float keep_checkpoint_every_n_hours = 6; // .tensorflow.SaverDef.CheckpointFormatVersion version = 7; // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // namespace protobuf // namespace google // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2fsaver_2eproto // Parsed from tensorflow/core/protobuf/meta_graph.pb.h // Generated by the protocol buffer compiler. DO NOT EDIT! // source: tensorflow/core/protobuf/meta_graph.proto // #ifndef PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2fmeta_5fgraph_2eproto // #define PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2fmeta_5fgraph_2eproto // #include // #include // #if GOOGLE_PROTOBUF_VERSION < 3006000 // #error This file was generated by a newer version of protoc which is // #error incompatible with your Protocol Buffer headers. Please update // #error your headers. // #endif // #if 3006000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION // #error This file was generated by an older version of protoc which is // #error incompatible with your Protocol Buffer headers. Please // #error regenerate this file with a newer version of protoc. // #endif // #include // #include // #include // #include // #include // #include // #include // #include // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // IWYU pragma: export // #include // #include // #include // #include // #include "tensorflow/core/framework/graph.pb.h" // #include "tensorflow/core/framework/op_def.pb.h" // #include "tensorflow/core/framework/tensor_shape.pb.h" // #include "tensorflow/core/framework/types.pb.h" // #include "tensorflow/core/protobuf/saver.pb.h" // @@protoc_insertion_point(includes) // #define PROTOBUF_INTERNAL_EXPORT_protobuf_tensorflow_2fcore_2fprotobuf_2fmeta_5fgraph_2eproto // Internal implementation detail -- do not use these members. // namespace protobuf_tensorflow_2fcore_2fprotobuf_2fmeta_5fgraph_2eproto @Namespace("tensorflow") @Opaque public static class MetaGraphDef_CollectionDefEntry_DoNotUse extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public MetaGraphDef_CollectionDefEntry_DoNotUse() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MetaGraphDef_CollectionDefEntry_DoNotUse(Pointer p) { super(p); } } @Namespace("tensorflow") @Opaque public static class MetaGraphDef_SignatureDefEntry_DoNotUse extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public MetaGraphDef_SignatureDefEntry_DoNotUse() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MetaGraphDef_SignatureDefEntry_DoNotUse(Pointer p) { super(p); } } @Namespace("tensorflow") @Opaque public static class SignatureDef_InputsEntry_DoNotUse extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public SignatureDef_InputsEntry_DoNotUse() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SignatureDef_InputsEntry_DoNotUse(Pointer p) { super(p); } } @Namespace("tensorflow") @Opaque public static class SignatureDef_OutputsEntry_DoNotUse extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public SignatureDef_OutputsEntry_DoNotUse() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SignatureDef_OutputsEntry_DoNotUse(Pointer p) { super(p); } } // namespace tensorflow // namespace protobuf // namespace google // =================================================================== @Namespace("tensorflow") @NoOffset public static class MetaGraphDef_MetaInfoDef extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MetaGraphDef_MetaInfoDef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public MetaGraphDef_MetaInfoDef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public MetaGraphDef_MetaInfoDef position(long position) { return (MetaGraphDef_MetaInfoDef)super.position(position); } public MetaGraphDef_MetaInfoDef() { super((Pointer)null); allocate(); } private native void allocate(); public MetaGraphDef_MetaInfoDef(@Const @ByRef MetaGraphDef_MetaInfoDef from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef MetaGraphDef_MetaInfoDef from); public native @ByRef @Name("operator =") MetaGraphDef_MetaInfoDef put(@Const @ByRef MetaGraphDef_MetaInfoDef from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef MetaGraphDef_MetaInfoDef default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const MetaGraphDef_MetaInfoDef internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(MetaGraphDef_MetaInfoDef other); public native void Swap(MetaGraphDef_MetaInfoDef other); // implements Message ---------------------------------------------- public native MetaGraphDef_MetaInfoDef New(); public native MetaGraphDef_MetaInfoDef New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef MetaGraphDef_MetaInfoDef from); public native void MergeFrom(@Const @ByRef MetaGraphDef_MetaInfoDef from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated string tags = 4; public native int tags_size(); public native void clear_tags(); @MemberGetter public static native int kTagsFieldNumber(); public static final int kTagsFieldNumber = kTagsFieldNumber(); public native @StdString BytePointer tags(int index); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_tags(int index); public native void set_tags(int index, @StdString BytePointer value); public native void set_tags(int index, @StdString String value); // #if LANG_CXX11 // #endif public native void set_tags(int index, @Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_tags(int index, String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_tags(); public native void add_tags(@StdString BytePointer value); public native void add_tags(@StdString String value); // #if LANG_CXX11 // #endif public native void add_tags(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void add_tags(String value, @Cast("size_t") long size); // string meta_graph_version = 1; public native void clear_meta_graph_version(); @MemberGetter public static native int kMetaGraphVersionFieldNumber(); public static final int kMetaGraphVersionFieldNumber = kMetaGraphVersionFieldNumber(); public native @StdString BytePointer meta_graph_version(); public native void set_meta_graph_version(@StdString BytePointer value); public native void set_meta_graph_version(@StdString String value); // #if LANG_CXX11 // #endif public native void set_meta_graph_version(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_meta_graph_version(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_meta_graph_version(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_meta_graph_version(); public native void set_allocated_meta_graph_version(@StdString @Cast({"char*", "std::string*"}) BytePointer meta_graph_version); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_meta_graph_version(); public native @Deprecated void unsafe_arena_set_allocated_meta_graph_version( @StdString @Cast({"char*", "std::string*"}) BytePointer meta_graph_version); // string tensorflow_version = 5; public native void clear_tensorflow_version(); @MemberGetter public static native int kTensorflowVersionFieldNumber(); public static final int kTensorflowVersionFieldNumber = kTensorflowVersionFieldNumber(); public native @StdString BytePointer tensorflow_version(); public native void set_tensorflow_version(@StdString BytePointer value); public native void set_tensorflow_version(@StdString String value); // #if LANG_CXX11 // #endif public native void set_tensorflow_version(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_tensorflow_version(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_tensorflow_version(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_tensorflow_version(); public native void set_allocated_tensorflow_version(@StdString @Cast({"char*", "std::string*"}) BytePointer tensorflow_version); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_tensorflow_version(); public native @Deprecated void unsafe_arena_set_allocated_tensorflow_version( @StdString @Cast({"char*", "std::string*"}) BytePointer tensorflow_version); // string tensorflow_git_version = 6; public native void clear_tensorflow_git_version(); @MemberGetter public static native int kTensorflowGitVersionFieldNumber(); public static final int kTensorflowGitVersionFieldNumber = kTensorflowGitVersionFieldNumber(); public native @StdString BytePointer tensorflow_git_version(); public native void set_tensorflow_git_version(@StdString BytePointer value); public native void set_tensorflow_git_version(@StdString String value); // #if LANG_CXX11 // #endif public native void set_tensorflow_git_version(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_tensorflow_git_version(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_tensorflow_git_version(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_tensorflow_git_version(); public native void set_allocated_tensorflow_git_version(@StdString @Cast({"char*", "std::string*"}) BytePointer tensorflow_git_version); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_tensorflow_git_version(); public native @Deprecated void unsafe_arena_set_allocated_tensorflow_git_version( @StdString @Cast({"char*", "std::string*"}) BytePointer tensorflow_git_version); // .tensorflow.OpList stripped_op_list = 2; public native @Cast("bool") boolean has_stripped_op_list(); public native void clear_stripped_op_list(); @MemberGetter public static native int kStrippedOpListFieldNumber(); public static final int kStrippedOpListFieldNumber = kStrippedOpListFieldNumber(); public native @Const @ByRef OpList stripped_op_list(); public native OpList release_stripped_op_list(); public native OpList mutable_stripped_op_list(); public native void set_allocated_stripped_op_list(OpList stripped_op_list); public native void unsafe_arena_set_allocated_stripped_op_list( OpList stripped_op_list); public native OpList unsafe_arena_release_stripped_op_list(); // .google.protobuf.Any any_info = 3; public native @Cast("bool") boolean has_any_info(); public native void clear_any_info(); @MemberGetter public static native int kAnyInfoFieldNumber(); public static final int kAnyInfoFieldNumber = kAnyInfoFieldNumber(); public native @Cast("const google::protobuf::Any*") @ByRef Pointer any_info(); public native @Cast("google::protobuf::Any*") Pointer release_any_info(); public native @Cast("google::protobuf::Any*") Pointer mutable_any_info(); public native void set_allocated_any_info(@Cast("google::protobuf::Any*") Pointer any_info); public native void unsafe_arena_set_allocated_any_info( @Cast("google::protobuf::Any*") Pointer any_info); public native @Cast("google::protobuf::Any*") Pointer unsafe_arena_release_any_info(); // bool stripped_default_attrs = 7; public native void clear_stripped_default_attrs(); @MemberGetter public static native int kStrippedDefaultAttrsFieldNumber(); public static final int kStrippedDefaultAttrsFieldNumber = kStrippedDefaultAttrsFieldNumber(); public native @Cast("bool") boolean stripped_default_attrs(); public native void set_stripped_default_attrs(@Cast("bool") boolean value); } // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class MetaGraphDef extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MetaGraphDef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public MetaGraphDef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public MetaGraphDef position(long position) { return (MetaGraphDef)super.position(position); } public MetaGraphDef() { super((Pointer)null); allocate(); } private native void allocate(); public MetaGraphDef(@Const @ByRef MetaGraphDef from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef MetaGraphDef from); public native @ByRef @Name("operator =") MetaGraphDef put(@Const @ByRef MetaGraphDef from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef MetaGraphDef default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const MetaGraphDef internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(MetaGraphDef other); public native void Swap(MetaGraphDef other); // implements Message ---------------------------------------------- public native MetaGraphDef New(); public native MetaGraphDef New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef MetaGraphDef from); public native void MergeFrom(@Const @ByRef MetaGraphDef from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // map collection_def = 4; public native int collection_def_size(); public native void clear_collection_def(); @MemberGetter public static native int kCollectionDefFieldNumber(); public static final int kCollectionDefFieldNumber = kCollectionDefFieldNumber(); public native @Const @ByRef StringCollectionDefMap collection_def(); public native StringCollectionDefMap mutable_collection_def(); // map signature_def = 5; public native int signature_def_size(); public native void clear_signature_def(); @MemberGetter public static native int kSignatureDefFieldNumber(); public static final int kSignatureDefFieldNumber = kSignatureDefFieldNumber(); public native @Const @ByRef StringSignatureDefMap signature_def(); public native StringSignatureDefMap mutable_signature_def(); // repeated .tensorflow.AssetFileDef asset_file_def = 6; public native int asset_file_def_size(); public native void clear_asset_file_def(); @MemberGetter public static native int kAssetFileDefFieldNumber(); public static final int kAssetFileDefFieldNumber = kAssetFileDefFieldNumber(); public native AssetFileDef mutable_asset_file_def(int index); public native @Const @ByRef AssetFileDef asset_file_def(int index); public native AssetFileDef add_asset_file_def(); // .tensorflow.MetaGraphDef.MetaInfoDef meta_info_def = 1; public native @Cast("bool") boolean has_meta_info_def(); public native void clear_meta_info_def(); @MemberGetter public static native int kMetaInfoDefFieldNumber(); public static final int kMetaInfoDefFieldNumber = kMetaInfoDefFieldNumber(); public native @Const @ByRef MetaGraphDef_MetaInfoDef meta_info_def(); public native MetaGraphDef_MetaInfoDef release_meta_info_def(); public native MetaGraphDef_MetaInfoDef mutable_meta_info_def(); public native void set_allocated_meta_info_def(MetaGraphDef_MetaInfoDef meta_info_def); public native void unsafe_arena_set_allocated_meta_info_def( MetaGraphDef_MetaInfoDef meta_info_def); public native MetaGraphDef_MetaInfoDef unsafe_arena_release_meta_info_def(); // .tensorflow.GraphDef graph_def = 2; public native @Cast("bool") boolean has_graph_def(); public native void clear_graph_def(); @MemberGetter public static native int kGraphDefFieldNumber(); public static final int kGraphDefFieldNumber = kGraphDefFieldNumber(); public native @Const @ByRef GraphDef graph_def(); public native GraphDef release_graph_def(); public native GraphDef mutable_graph_def(); public native void set_allocated_graph_def(GraphDef graph_def); public native void unsafe_arena_set_allocated_graph_def( GraphDef graph_def); public native GraphDef unsafe_arena_release_graph_def(); // .tensorflow.SaverDef saver_def = 3; public native @Cast("bool") boolean has_saver_def(); public native void clear_saver_def(); @MemberGetter public static native int kSaverDefFieldNumber(); public static final int kSaverDefFieldNumber = kSaverDefFieldNumber(); public native @Const @ByRef SaverDef saver_def(); public native SaverDef release_saver_def(); public native SaverDef mutable_saver_def(); public native void set_allocated_saver_def(SaverDef saver_def); public native void unsafe_arena_set_allocated_saver_def( SaverDef saver_def); public native SaverDef unsafe_arena_release_saver_def(); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class CollectionDef_NodeList extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CollectionDef_NodeList(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public CollectionDef_NodeList(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public CollectionDef_NodeList position(long position) { return (CollectionDef_NodeList)super.position(position); } public CollectionDef_NodeList() { super((Pointer)null); allocate(); } private native void allocate(); public CollectionDef_NodeList(@Const @ByRef CollectionDef_NodeList from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef CollectionDef_NodeList from); public native @ByRef @Name("operator =") CollectionDef_NodeList put(@Const @ByRef CollectionDef_NodeList from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef CollectionDef_NodeList default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const CollectionDef_NodeList internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(CollectionDef_NodeList other); public native void Swap(CollectionDef_NodeList other); // implements Message ---------------------------------------------- public native CollectionDef_NodeList New(); public native CollectionDef_NodeList New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef CollectionDef_NodeList from); public native void MergeFrom(@Const @ByRef CollectionDef_NodeList from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated string value = 1; public native int value_size(); public native void clear_value(); @MemberGetter public static native int kValueFieldNumber(); public static final int kValueFieldNumber = kValueFieldNumber(); public native @StdString BytePointer value(int index); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_value(int index); public native void set_value(int index, @StdString BytePointer value); public native void set_value(int index, @StdString String value); // #if LANG_CXX11 // #endif public native void set_value(int index, @Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_value(int index, String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_value(); public native void add_value(@StdString BytePointer value); public native void add_value(@StdString String value); // #if LANG_CXX11 // #endif public native void add_value(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void add_value(String value, @Cast("size_t") long size); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class CollectionDef_BytesList extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CollectionDef_BytesList(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public CollectionDef_BytesList(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public CollectionDef_BytesList position(long position) { return (CollectionDef_BytesList)super.position(position); } public CollectionDef_BytesList() { super((Pointer)null); allocate(); } private native void allocate(); public CollectionDef_BytesList(@Const @ByRef CollectionDef_BytesList from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef CollectionDef_BytesList from); public native @ByRef @Name("operator =") CollectionDef_BytesList put(@Const @ByRef CollectionDef_BytesList from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef CollectionDef_BytesList default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const CollectionDef_BytesList internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(CollectionDef_BytesList other); public native void Swap(CollectionDef_BytesList other); // implements Message ---------------------------------------------- public native CollectionDef_BytesList New(); public native CollectionDef_BytesList New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef CollectionDef_BytesList from); public native void MergeFrom(@Const @ByRef CollectionDef_BytesList from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated bytes value = 1; public native int value_size(); public native void clear_value(); @MemberGetter public static native int kValueFieldNumber(); public static final int kValueFieldNumber = kValueFieldNumber(); public native @StdString BytePointer value(int index); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_value(int index); public native void set_value(int index, @StdString BytePointer value); public native void set_value(int index, @StdString String value); // #if LANG_CXX11 // #endif public native void set_value(int index, @Const Pointer value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_value(); public native void add_value(@StdString BytePointer value); public native void add_value(@StdString String value); // #if LANG_CXX11 // #endif public native void add_value(@Const Pointer value, @Cast("size_t") long size); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class CollectionDef_Int64List extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CollectionDef_Int64List(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public CollectionDef_Int64List(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public CollectionDef_Int64List position(long position) { return (CollectionDef_Int64List)super.position(position); } public CollectionDef_Int64List() { super((Pointer)null); allocate(); } private native void allocate(); public CollectionDef_Int64List(@Const @ByRef CollectionDef_Int64List from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef CollectionDef_Int64List from); public native @ByRef @Name("operator =") CollectionDef_Int64List put(@Const @ByRef CollectionDef_Int64List from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef CollectionDef_Int64List default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const CollectionDef_Int64List internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(CollectionDef_Int64List other); public native void Swap(CollectionDef_Int64List other); // implements Message ---------------------------------------------- public native CollectionDef_Int64List New(); public native CollectionDef_Int64List New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef CollectionDef_Int64List from); public native void MergeFrom(@Const @ByRef CollectionDef_Int64List from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated int64 value = 1 [packed = true]; public native int value_size(); public native void clear_value(); @MemberGetter public static native int kValueFieldNumber(); public static final int kValueFieldNumber = kValueFieldNumber(); public native @Cast("google::protobuf::int64") long value(int index); public native void set_value(int index, @Cast("google::protobuf::int64") long value); public native void add_value(@Cast("google::protobuf::int64") long value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class CollectionDef_FloatList extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CollectionDef_FloatList(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public CollectionDef_FloatList(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public CollectionDef_FloatList position(long position) { return (CollectionDef_FloatList)super.position(position); } public CollectionDef_FloatList() { super((Pointer)null); allocate(); } private native void allocate(); public CollectionDef_FloatList(@Const @ByRef CollectionDef_FloatList from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef CollectionDef_FloatList from); public native @ByRef @Name("operator =") CollectionDef_FloatList put(@Const @ByRef CollectionDef_FloatList from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef CollectionDef_FloatList default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const CollectionDef_FloatList internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(CollectionDef_FloatList other); public native void Swap(CollectionDef_FloatList other); // implements Message ---------------------------------------------- public native CollectionDef_FloatList New(); public native CollectionDef_FloatList New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef CollectionDef_FloatList from); public native void MergeFrom(@Const @ByRef CollectionDef_FloatList from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated float value = 1 [packed = true]; public native int value_size(); public native void clear_value(); @MemberGetter public static native int kValueFieldNumber(); public static final int kValueFieldNumber = kValueFieldNumber(); public native float value(int index); public native void set_value(int index, float value); public native void add_value(float value); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class CollectionDef_AnyList extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CollectionDef_AnyList(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public CollectionDef_AnyList(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public CollectionDef_AnyList position(long position) { return (CollectionDef_AnyList)super.position(position); } public CollectionDef_AnyList() { super((Pointer)null); allocate(); } private native void allocate(); public CollectionDef_AnyList(@Const @ByRef CollectionDef_AnyList from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef CollectionDef_AnyList from); public native @ByRef @Name("operator =") CollectionDef_AnyList put(@Const @ByRef CollectionDef_AnyList from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef CollectionDef_AnyList default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const CollectionDef_AnyList internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(CollectionDef_AnyList other); public native void Swap(CollectionDef_AnyList other); // implements Message ---------------------------------------------- public native CollectionDef_AnyList New(); public native CollectionDef_AnyList New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef CollectionDef_AnyList from); public native void MergeFrom(@Const @ByRef CollectionDef_AnyList from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // repeated .google.protobuf.Any value = 1; public native int value_size(); public native void clear_value(); @MemberGetter public static native int kValueFieldNumber(); public static final int kValueFieldNumber = kValueFieldNumber(); public native @Cast("google::protobuf::Any*") Pointer mutable_value(int index); public native @Cast("const google::protobuf::Any*") @ByRef Pointer value(int index); public native @Cast("google::protobuf::Any*") Pointer add_value(); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class CollectionDef extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CollectionDef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public CollectionDef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public CollectionDef position(long position) { return (CollectionDef)super.position(position); } public CollectionDef() { super((Pointer)null); allocate(); } private native void allocate(); public CollectionDef(@Const @ByRef CollectionDef from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef CollectionDef from); public native @ByRef @Name("operator =") CollectionDef put(@Const @ByRef CollectionDef from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef CollectionDef default_instance(); /** enum tensorflow::CollectionDef::KindCase */ public static final int kNodeList = 1, kBytesList = 2, kInt64List = 3, kFloatList = 4, kAnyList = 5, KIND_NOT_SET = 0; public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const CollectionDef internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(CollectionDef other); public native void Swap(CollectionDef other); // implements Message ---------------------------------------------- public native CollectionDef New(); public native CollectionDef New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef CollectionDef from); public native void MergeFrom(@Const @ByRef CollectionDef from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // .tensorflow.CollectionDef.NodeList node_list = 1; public native @Cast("bool") boolean has_node_list(); public native void clear_node_list(); @MemberGetter public static native int kNodeListFieldNumber(); public static final int kNodeListFieldNumber = kNodeListFieldNumber(); public native @Const @ByRef CollectionDef_NodeList node_list(); public native CollectionDef_NodeList release_node_list(); public native CollectionDef_NodeList mutable_node_list(); public native void set_allocated_node_list(CollectionDef_NodeList node_list); public native void unsafe_arena_set_allocated_node_list( CollectionDef_NodeList node_list); public native CollectionDef_NodeList unsafe_arena_release_node_list(); // .tensorflow.CollectionDef.BytesList bytes_list = 2; public native @Cast("bool") boolean has_bytes_list(); public native void clear_bytes_list(); @MemberGetter public static native int kBytesListFieldNumber(); public static final int kBytesListFieldNumber = kBytesListFieldNumber(); public native @Const @ByRef CollectionDef_BytesList bytes_list(); public native CollectionDef_BytesList release_bytes_list(); public native CollectionDef_BytesList mutable_bytes_list(); public native void set_allocated_bytes_list(CollectionDef_BytesList bytes_list); public native void unsafe_arena_set_allocated_bytes_list( CollectionDef_BytesList bytes_list); public native CollectionDef_BytesList unsafe_arena_release_bytes_list(); // .tensorflow.CollectionDef.Int64List int64_list = 3; public native @Cast("bool") boolean has_int64_list(); public native void clear_int64_list(); @MemberGetter public static native int kInt64ListFieldNumber(); public static final int kInt64ListFieldNumber = kInt64ListFieldNumber(); public native @Const @ByRef CollectionDef_Int64List int64_list(); public native CollectionDef_Int64List release_int64_list(); public native CollectionDef_Int64List mutable_int64_list(); public native void set_allocated_int64_list(CollectionDef_Int64List int64_list); public native void unsafe_arena_set_allocated_int64_list( CollectionDef_Int64List int64_list); public native CollectionDef_Int64List unsafe_arena_release_int64_list(); // .tensorflow.CollectionDef.FloatList float_list = 4; public native @Cast("bool") boolean has_float_list(); public native void clear_float_list(); @MemberGetter public static native int kFloatListFieldNumber(); public static final int kFloatListFieldNumber = kFloatListFieldNumber(); public native @Const @ByRef CollectionDef_FloatList float_list(); public native CollectionDef_FloatList release_float_list(); public native CollectionDef_FloatList mutable_float_list(); public native void set_allocated_float_list(CollectionDef_FloatList float_list); public native void unsafe_arena_set_allocated_float_list( CollectionDef_FloatList float_list); public native CollectionDef_FloatList unsafe_arena_release_float_list(); // .tensorflow.CollectionDef.AnyList any_list = 5; public native @Cast("bool") boolean has_any_list(); public native void clear_any_list(); @MemberGetter public static native int kAnyListFieldNumber(); public static final int kAnyListFieldNumber = kAnyListFieldNumber(); public native @Const @ByRef CollectionDef_AnyList any_list(); public native CollectionDef_AnyList release_any_list(); public native CollectionDef_AnyList mutable_any_list(); public native void set_allocated_any_list(CollectionDef_AnyList any_list); public native void unsafe_arena_set_allocated_any_list( CollectionDef_AnyList any_list); public native CollectionDef_AnyList unsafe_arena_release_any_list(); public native void clear_kind(); public native @Cast("tensorflow::CollectionDef::KindCase") int kind_case(); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class TensorInfo_CooSparse extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorInfo_CooSparse(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TensorInfo_CooSparse(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public TensorInfo_CooSparse position(long position) { return (TensorInfo_CooSparse)super.position(position); } public TensorInfo_CooSparse() { super((Pointer)null); allocate(); } private native void allocate(); public TensorInfo_CooSparse(@Const @ByRef TensorInfo_CooSparse from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef TensorInfo_CooSparse from); public native @ByRef @Name("operator =") TensorInfo_CooSparse put(@Const @ByRef TensorInfo_CooSparse from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef TensorInfo_CooSparse default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const TensorInfo_CooSparse internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(TensorInfo_CooSparse other); public native void Swap(TensorInfo_CooSparse other); // implements Message ---------------------------------------------- public native TensorInfo_CooSparse New(); public native TensorInfo_CooSparse New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef TensorInfo_CooSparse from); public native void MergeFrom(@Const @ByRef TensorInfo_CooSparse from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // string values_tensor_name = 1; public native void clear_values_tensor_name(); @MemberGetter public static native int kValuesTensorNameFieldNumber(); public static final int kValuesTensorNameFieldNumber = kValuesTensorNameFieldNumber(); public native @StdString BytePointer values_tensor_name(); public native void set_values_tensor_name(@StdString BytePointer value); public native void set_values_tensor_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_values_tensor_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_values_tensor_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_values_tensor_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_values_tensor_name(); public native void set_allocated_values_tensor_name(@StdString @Cast({"char*", "std::string*"}) BytePointer values_tensor_name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_values_tensor_name(); public native @Deprecated void unsafe_arena_set_allocated_values_tensor_name( @StdString @Cast({"char*", "std::string*"}) BytePointer values_tensor_name); // string indices_tensor_name = 2; public native void clear_indices_tensor_name(); @MemberGetter public static native int kIndicesTensorNameFieldNumber(); public static final int kIndicesTensorNameFieldNumber = kIndicesTensorNameFieldNumber(); public native @StdString BytePointer indices_tensor_name(); public native void set_indices_tensor_name(@StdString BytePointer value); public native void set_indices_tensor_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_indices_tensor_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_indices_tensor_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_indices_tensor_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_indices_tensor_name(); public native void set_allocated_indices_tensor_name(@StdString @Cast({"char*", "std::string*"}) BytePointer indices_tensor_name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_indices_tensor_name(); public native @Deprecated void unsafe_arena_set_allocated_indices_tensor_name( @StdString @Cast({"char*", "std::string*"}) BytePointer indices_tensor_name); // string dense_shape_tensor_name = 3; public native void clear_dense_shape_tensor_name(); @MemberGetter public static native int kDenseShapeTensorNameFieldNumber(); public static final int kDenseShapeTensorNameFieldNumber = kDenseShapeTensorNameFieldNumber(); public native @StdString BytePointer dense_shape_tensor_name(); public native void set_dense_shape_tensor_name(@StdString BytePointer value); public native void set_dense_shape_tensor_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_dense_shape_tensor_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_dense_shape_tensor_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_dense_shape_tensor_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_dense_shape_tensor_name(); public native void set_allocated_dense_shape_tensor_name(@StdString @Cast({"char*", "std::string*"}) BytePointer dense_shape_tensor_name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_dense_shape_tensor_name(); public native @Deprecated void unsafe_arena_set_allocated_dense_shape_tensor_name( @StdString @Cast({"char*", "std::string*"}) BytePointer dense_shape_tensor_name); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class TensorInfo extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorInfo(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public TensorInfo(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public TensorInfo position(long position) { return (TensorInfo)super.position(position); } public TensorInfo() { super((Pointer)null); allocate(); } private native void allocate(); public TensorInfo(@Const @ByRef TensorInfo from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef TensorInfo from); public native @ByRef @Name("operator =") TensorInfo put(@Const @ByRef TensorInfo from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef TensorInfo default_instance(); /** enum tensorflow::TensorInfo::EncodingCase */ public static final int kName = 1, kCooSparse = 4, ENCODING_NOT_SET = 0; public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const TensorInfo internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(TensorInfo other); public native void Swap(TensorInfo other); // implements Message ---------------------------------------------- public native TensorInfo New(); public native TensorInfo New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef TensorInfo from); public native void MergeFrom(@Const @ByRef TensorInfo from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // .tensorflow.TensorShapeProto tensor_shape = 3; public native @Cast("bool") boolean has_tensor_shape(); public native void clear_tensor_shape(); @MemberGetter public static native int kTensorShapeFieldNumber(); public static final int kTensorShapeFieldNumber = kTensorShapeFieldNumber(); public native @Const @ByRef TensorShapeProto tensor_shape(); public native TensorShapeProto release_tensor_shape(); public native TensorShapeProto mutable_tensor_shape(); public native void set_allocated_tensor_shape(TensorShapeProto tensor_shape); public native void unsafe_arena_set_allocated_tensor_shape( TensorShapeProto tensor_shape); public native TensorShapeProto unsafe_arena_release_tensor_shape(); // .tensorflow.DataType dtype = 2; public native void clear_dtype(); @MemberGetter public static native int kDtypeFieldNumber(); public static final int kDtypeFieldNumber = kDtypeFieldNumber(); public native @Cast("tensorflow::DataType") int dtype(); public native void set_dtype(@Cast("tensorflow::DataType") int value); public native void clear_name(); @MemberGetter public static native int kNameFieldNumber(); public static final int kNameFieldNumber = kNameFieldNumber(); public native @StdString BytePointer name(); public native void set_name(@StdString BytePointer value); public native void set_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name(); public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_name(); public native @Deprecated void unsafe_arena_set_allocated_name( @StdString @Cast({"char*", "std::string*"}) BytePointer name); // .tensorflow.TensorInfo.CooSparse coo_sparse = 4; public native @Cast("bool") boolean has_coo_sparse(); public native void clear_coo_sparse(); @MemberGetter public static native int kCooSparseFieldNumber(); public static final int kCooSparseFieldNumber = kCooSparseFieldNumber(); public native @Const @ByRef TensorInfo_CooSparse coo_sparse(); public native TensorInfo_CooSparse release_coo_sparse(); public native TensorInfo_CooSparse mutable_coo_sparse(); public native void set_allocated_coo_sparse(TensorInfo_CooSparse coo_sparse); public native void unsafe_arena_set_allocated_coo_sparse( TensorInfo_CooSparse coo_sparse); public native TensorInfo_CooSparse unsafe_arena_release_coo_sparse(); public native void clear_encoding(); public native @Cast("tensorflow::TensorInfo::EncodingCase") int encoding_case(); } // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class SignatureDef extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SignatureDef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public SignatureDef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public SignatureDef position(long position) { return (SignatureDef)super.position(position); } public SignatureDef() { super((Pointer)null); allocate(); } private native void allocate(); public SignatureDef(@Const @ByRef SignatureDef from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef SignatureDef from); public native @ByRef @Name("operator =") SignatureDef put(@Const @ByRef SignatureDef from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef SignatureDef default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const SignatureDef internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(SignatureDef other); public native void Swap(SignatureDef other); // implements Message ---------------------------------------------- public native SignatureDef New(); public native SignatureDef New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef SignatureDef from); public native void MergeFrom(@Const @ByRef SignatureDef from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // map inputs = 1; public native int inputs_size(); public native void clear_inputs(); @MemberGetter public static native int kInputsFieldNumber(); public static final int kInputsFieldNumber = kInputsFieldNumber(); public native @Const @ByRef StringTensorInfoMap inputs(); public native StringTensorInfoMap mutable_inputs(); // map outputs = 2; public native int outputs_size(); public native void clear_outputs(); @MemberGetter public static native int kOutputsFieldNumber(); public static final int kOutputsFieldNumber = kOutputsFieldNumber(); public native @Const @ByRef StringTensorInfoMap outputs(); public native StringTensorInfoMap mutable_outputs(); // string method_name = 3; public native void clear_method_name(); @MemberGetter public static native int kMethodNameFieldNumber(); public static final int kMethodNameFieldNumber = kMethodNameFieldNumber(); public native @StdString BytePointer method_name(); public native void set_method_name(@StdString BytePointer value); public native void set_method_name(@StdString String value); // #if LANG_CXX11 // #endif public native void set_method_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_method_name(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_method_name(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_method_name(); public native void set_allocated_method_name(@StdString @Cast({"char*", "std::string*"}) BytePointer method_name); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_method_name(); public native @Deprecated void unsafe_arena_set_allocated_method_name( @StdString @Cast({"char*", "std::string*"}) BytePointer method_name); } // ------------------------------------------------------------------- @Namespace("tensorflow") @NoOffset public static class AssetFileDef extends MessageLite { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AssetFileDef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public AssetFileDef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public AssetFileDef position(long position) { return (AssetFileDef)super.position(position); } public AssetFileDef() { super((Pointer)null); allocate(); } private native void allocate(); public AssetFileDef(@Const @ByRef AssetFileDef from) { super((Pointer)null); allocate(from); } private native void allocate(@Const @ByRef AssetFileDef from); public native @ByRef @Name("operator =") AssetFileDef put(@Const @ByRef AssetFileDef from); // #if LANG_CXX11 // #endif public native Arena GetArena(); public native Pointer GetMaybeArenaPointer(); public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor(); public static native @Const @ByRef AssetFileDef default_instance(); public static native void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY public static native @Const AssetFileDef internal_default_instance(); @MemberGetter public static native int kIndexInFileMessages(); public static final int kIndexInFileMessages = kIndexInFileMessages(); public native void UnsafeArenaSwap(AssetFileDef other); public native void Swap(AssetFileDef other); // implements Message ---------------------------------------------- public native AssetFileDef New(); public native AssetFileDef New(Arena arena); public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef MessageLite from); public native void CopyFrom(@Const @ByRef AssetFileDef from); public native void MergeFrom(@Const @ByRef AssetFileDef from); public native void Clear(); public native @Cast("bool") boolean IsInitialized(); public native @Cast("size_t") long ByteSizeLong(); public native @Cast("bool") boolean MergePartialFromCodedStream( CodedInputStream input); public native void SerializeWithCachedSizes( CodedOutputStream output); public native @Cast("google::protobuf::uint8*") BytePointer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") BytePointer target); public native @Cast("google::protobuf::uint8*") ByteBuffer InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") ByteBuffer target); public native @Cast("google::protobuf::uint8*") byte[] InternalSerializeWithCachedSizesToArray( @Cast("bool") boolean deterministic, @Cast("google::protobuf::uint8*") byte[] target); public native int GetCachedSize(); public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata(); // nested types ---------------------------------------------------- // accessors ------------------------------------------------------- // string filename = 2; public native void clear_filename(); @MemberGetter public static native int kFilenameFieldNumber(); public static final int kFilenameFieldNumber = kFilenameFieldNumber(); public native @StdString BytePointer filename(); public native void set_filename(@StdString BytePointer value); public native void set_filename(@StdString String value); // #if LANG_CXX11 // #endif public native void set_filename(@Cast("const char*") BytePointer value, @Cast("size_t") long size); public native void set_filename(String value, @Cast("size_t") long size); public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_filename(); public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_filename(); public native void set_allocated_filename(@StdString @Cast({"char*", "std::string*"}) BytePointer filename); public native @Deprecated @StdString @Cast({"char*", "std::string*"}) BytePointer unsafe_arena_release_filename(); public native @Deprecated void unsafe_arena_set_allocated_filename( @StdString @Cast({"char*", "std::string*"}) BytePointer filename); // .tensorflow.TensorInfo tensor_info = 1; public native @Cast("bool") boolean has_tensor_info(); public native void clear_tensor_info(); @MemberGetter public static native int kTensorInfoFieldNumber(); public static final int kTensorInfoFieldNumber = kTensorInfoFieldNumber(); public native @Const @ByRef TensorInfo tensor_info(); public native TensorInfo release_tensor_info(); public native TensorInfo mutable_tensor_info(); public native void set_allocated_tensor_info(TensorInfo tensor_info); public native void unsafe_arena_set_allocated_tensor_info( TensorInfo tensor_info); public native TensorInfo unsafe_arena_release_tensor_info(); } // =================================================================== // =================================================================== // #ifdef __GNUC__ // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored "-Wstrict-aliasing" // #endif // __GNUC__ // MetaGraphDef_MetaInfoDef // string meta_graph_version = 1; // #if LANG_CXX11 // #endif // .tensorflow.OpList stripped_op_list = 2; // .google.protobuf.Any any_info = 3; // repeated string tags = 4; // #if LANG_CXX11 // #endif // #if LANG_CXX11 // #endif // string tensorflow_version = 5; // #if LANG_CXX11 // #endif // string tensorflow_git_version = 6; // #if LANG_CXX11 // #endif // bool stripped_default_attrs = 7; // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // MetaGraphDef // .tensorflow.MetaGraphDef.MetaInfoDef meta_info_def = 1; // .tensorflow.GraphDef graph_def = 2; // .tensorflow.SaverDef saver_def = 3; // map collection_def = 4; // map signature_def = 5; // repeated .tensorflow.AssetFileDef asset_file_def = 6; // ------------------------------------------------------------------- // CollectionDef_NodeList // repeated string value = 1; // #if LANG_CXX11 // #endif // #if LANG_CXX11 // #endif // ------------------------------------------------------------------- // CollectionDef_BytesList // repeated bytes value = 1; // #if LANG_CXX11 // #endif // #if LANG_CXX11 // #endif // ------------------------------------------------------------------- // CollectionDef_Int64List // repeated int64 value = 1 [packed = true]; // ------------------------------------------------------------------- // CollectionDef_FloatList // repeated float value = 1 [packed = true]; // ------------------------------------------------------------------- // CollectionDef_AnyList // repeated .google.protobuf.Any value = 1; // ------------------------------------------------------------------- // CollectionDef // .tensorflow.CollectionDef.NodeList node_list = 1; // .tensorflow.CollectionDef.BytesList bytes_list = 2; // .tensorflow.CollectionDef.Int64List int64_list = 3; // .tensorflow.CollectionDef.FloatList float_list = 4; // .tensorflow.CollectionDef.AnyList any_list = 5; // ------------------------------------------------------------------- // TensorInfo_CooSparse // string values_tensor_name = 1; // #if LANG_CXX11 // #endif // string indices_tensor_name = 2; // #if LANG_CXX11 // #endif // string dense_shape_tensor_name = 3; // #if LANG_CXX11 // #endif // ------------------------------------------------------------------- // TensorInfo // string name = 1; // #if LANG_CXX11 // #endif // .tensorflow.TensorInfo.CooSparse coo_sparse = 4; // .tensorflow.DataType dtype = 2; // .tensorflow.TensorShapeProto tensor_shape = 3; // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // SignatureDef // map inputs = 1; // map outputs = 2; // string method_name = 3; // #if LANG_CXX11 // #endif // ------------------------------------------------------------------- // AssetFileDef // .tensorflow.TensorInfo tensor_info = 1; // string filename = 2; // #if LANG_CXX11 // #endif // #ifdef __GNUC__ // #pragma GCC diagnostic pop // #endif // __GNUC__ // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // ------------------------------------------------------------------- // @@protoc_insertion_point(namespace_scope) // namespace tensorflow // @@protoc_insertion_point(global_scope) // #endif // PROTOBUF_INCLUDED_tensorflow_2fcore_2fprotobuf_2fmeta_5fgraph_2eproto // Parsed from tensorflow/cc/saved_model/loader.h /* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ /** SavedModel loading functions and SavedModelBundle struct. */ // #ifndef TENSORFLOW_CC_SAVED_MODEL_LOADER_H_ // #define TENSORFLOW_CC_SAVED_MODEL_LOADER_H_ // #include // #include // #include "tensorflow/core/lib/core/status.h" // #include "tensorflow/core/protobuf/meta_graph.pb.h" // #include "tensorflow/core/public/session.h" /** SavedModel representation once the SavedModel is loaded from storage. */ @Namespace("tensorflow") @NoOffset public static class SavedModelBundle extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SavedModelBundle(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public SavedModelBundle(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @Override public SavedModelBundle position(long position) { return (SavedModelBundle)super.position(position); } public native @MemberGetter @UniquePtr Session session(); public native @ByRef MetaGraphDef meta_graph_def(); public native SavedModelBundle meta_graph_def(MetaGraphDef meta_graph_def); /** A TensorFlow Session does not Close itself on destruction. To avoid * resource leaks, we explicitly call Close on Sessions that we create. */ public SavedModelBundle() { super((Pointer)null); allocate(); } private native void allocate(); } /** Loads a SavedModel from the specified export directory. The meta graph def * to be loaded is identified by the supplied tags, corresponding exactly to * the set of tags used at SavedModel build time. Returns a SavedModel bundle * with a session and the requested meta graph def, if found. */ @Namespace("tensorflow") public static native @ByVal Status LoadSavedModel(@Const @ByRef SessionOptions session_options, @Const @ByRef RunOptions run_options, @StdString BytePointer export_dir, @Const @ByRef StringUnorderedSet tags, SavedModelBundle bundle); @Namespace("tensorflow") public static native @ByVal Status LoadSavedModel(@Const @ByRef SessionOptions session_options, @Const @ByRef RunOptions run_options, @StdString String export_dir, @Const @ByRef StringUnorderedSet tags, SavedModelBundle bundle); /** Checks whether the provided directory could contain a SavedModel. Note that * the method does not load any data by itself. If the method returns {@code false}, * the export directory definitely does not contain a SavedModel. If the method * returns {@code true}, the export directory may contain a SavedModel but provides * no guarantee that it can be loaded. */ @Namespace("tensorflow") public static native @Cast("bool") boolean MaybeSavedModelDirectory(@StdString BytePointer export_dir); @Namespace("tensorflow") public static native @Cast("bool") boolean MaybeSavedModelDirectory(@StdString String export_dir); // namespace tensorflow // #endif // TENSORFLOW_CC_SAVED_MODEL_LOADER_H_ // Parsed from tensorflow/cc/saved_model/tag_constants.h /* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CC_SAVED_MODEL_TAG_CONSTANTS_H_ // #define TENSORFLOW_CC_SAVED_MODEL_TAG_CONSTANTS_H_ /** Tag for the {@code gpu} graph. */ @Namespace("tensorflow") @MemberGetter public static native byte kSavedModelTagGpu(int i); @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer kSavedModelTagGpu(); /** Tag for the {@code tpu} graph. */ @Namespace("tensorflow") @MemberGetter public static native byte kSavedModelTagTpu(int i); @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer kSavedModelTagTpu(); /** Tag for the {@code serving} graph. */ @Namespace("tensorflow") @MemberGetter public static native byte kSavedModelTagServe(int i); @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer kSavedModelTagServe(); /** Tag for the {@code training} graph. */ @Namespace("tensorflow") @MemberGetter public static native byte kSavedModelTagTrain(int i); @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer kSavedModelTagTrain(); // namespace tensorflow // #endif // TENSORFLOW_CC_SAVED_MODEL_TAG_CONSTANTS_H_ // Parsed from tensorflow/cc/saved_model/signature_constants.h /* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CC_SAVED_MODEL_SIGNATURE_CONSTANTS_H_ // #define TENSORFLOW_CC_SAVED_MODEL_SIGNATURE_CONSTANTS_H_ /** Key in the signature def map for {@code default} serving signatures. The default * signature is used in inference requests where a specific signature was not * specified. */ @Namespace("tensorflow") @MemberGetter public static native byte kDefaultServingSignatureDefKey(int i); @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer kDefaultServingSignatureDefKey(); //////////////////////////////////////////////////////////////////////////////// /** Classification API constants.

* Classification inputs. */ @Namespace("tensorflow") @MemberGetter public static native byte kClassifyInputs(int i); @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer kClassifyInputs(); /** Classification method name used in a SignatureDef. */ @Namespace("tensorflow") @MemberGetter public static native byte kClassifyMethodName(int i); @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer kClassifyMethodName(); /** Classification classes output. */ @Namespace("tensorflow") @MemberGetter public static native byte kClassifyOutputClasses(int i); @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer kClassifyOutputClasses(); /** Classification scores output. */ @Namespace("tensorflow") @MemberGetter public static native byte kClassifyOutputScores(int i); @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer kClassifyOutputScores(); //////////////////////////////////////////////////////////////////////////////// /** Predict API constants.

* Predict inputs. */ @Namespace("tensorflow") @MemberGetter public static native byte kPredictInputs(int i); @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer kPredictInputs(); /** Predict method name used in a SignatureDef. */ @Namespace("tensorflow") @MemberGetter public static native byte kPredictMethodName(int i); @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer kPredictMethodName(); /** Predict outputs. */ @Namespace("tensorflow") @MemberGetter public static native byte kPredictOutputs(int i); @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer kPredictOutputs(); //////////////////////////////////////////////////////////////////////////////// /** Regression API constants.

* Regression inputs. */ @Namespace("tensorflow") @MemberGetter public static native byte kRegressInputs(int i); @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer kRegressInputs(); /** Regression method name used in a SignatureDef. */ @Namespace("tensorflow") @MemberGetter public static native byte kRegressMethodName(int i); @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer kRegressMethodName(); /** Regression outputs. */ @Namespace("tensorflow") @MemberGetter public static native byte kRegressOutputs(int i); @Namespace("tensorflow") @MemberGetter public static native @Cast("const char*") BytePointer kRegressOutputs(); //////////////////////////////////////////////////////////////////////////////// // namespace tensorflow // #endif // TENSORFLOW_CC_SAVED_MODEL_SIGNATURE_CONSTANTS_H_ // Parsed from tensorflow/cc/ops/standard_ops.h /* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CC_OPS_STANDARD_OPS_H_ // #define TENSORFLOW_CC_OPS_STANDARD_OPS_H_ // #include "tensorflow/cc/ops/array_ops.h" // #include "tensorflow/cc/ops/candidate_sampling_ops.h" // #include "tensorflow/cc/ops/const_op.h" // #include "tensorflow/cc/ops/control_flow_ops.h" // #include "tensorflow/cc/ops/data_flow_ops.h" // #include "tensorflow/cc/ops/image_ops.h" // #include "tensorflow/cc/ops/io_ops.h" // #include "tensorflow/cc/ops/linalg_ops.h" // #include "tensorflow/cc/ops/logging_ops.h" // #include "tensorflow/cc/ops/lookup_ops.h" // #include "tensorflow/cc/ops/math_ops.h" // #include "tensorflow/cc/ops/nn_ops.h" // #include "tensorflow/cc/ops/no_op.h" // #include "tensorflow/cc/ops/parsing_ops.h" // #include "tensorflow/cc/ops/random_ops.h" // #include "tensorflow/cc/ops/sparse_ops.h" // #include "tensorflow/cc/ops/state_ops.h" // #include "tensorflow/cc/ops/string_ops.h" // #include "tensorflow/cc/ops/training_ops.h" // #include "tensorflow/cc/ops/user_ops.h" // #endif // TENSORFLOW_CC_OPS_STANDARD_OPS_H_ // Parsed from tensorflow/cc/ops/const_op.h /* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // #ifndef TENSORFLOW_CC_OPS_CONST_OP_H_ // #define TENSORFLOW_CC_OPS_CONST_OP_H_ // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/cc/framework/scope.h" // #include "tensorflow/core/graph/node_builder.h" /** \defgroup const_op Const Op * \{ */ @Namespace("tensorflow::ops") public static native @ByVal Output Const(@Const @ByRef Scope scope, @Const @ByRef Input.Initializer val); @Namespace("tensorflow::ops") public static native @ByVal Output Const(@Const @ByRef Scope scope, @ByRef Tensor val); @Namespace("tensorflow::ops") public static native @ByVal Output Const(@Const @ByRef Scope scope, byte val); @Namespace("tensorflow::ops") public static native @ByVal Output Const(@Const @ByRef Scope scope, short val); @Namespace("tensorflow::ops") public static native @ByVal Output Const(@Const @ByRef Scope scope, int val); @Namespace("tensorflow::ops") public static native @ByVal Output Const(@Const @ByRef Scope scope, long val); @Namespace("tensorflow::ops") public static native @ByVal Output Const(@Const @ByRef Scope scope, float val); @Namespace("tensorflow::ops") public static native @ByVal Output Const(@Const @ByRef Scope scope, double val); @Namespace("tensorflow::ops") public static native @ByVal Output Const(@Const @ByRef Scope scope, boolean val); @Namespace("tensorflow::ops") public static native @ByVal Output Const(@Const @ByRef Scope scope, @StdString String val); @Namespace("tensorflow::ops") public static native @ByVal Output Const(@Const @ByRef Scope scope, @StdString BytePointer val); @Namespace("tensorflow::ops") public static native @ByVal Output ConstFromProto(@Const @ByRef Scope scope, @Const @ByRef TensorProto proto); @Namespace("tensorflow::ops") public static native @ByVal NodeBuilder.NodeOut AsNodeOut(@Const @ByRef Scope scope, @Const @ByRef Input inp); @Namespace("tensorflow::ops") public static native @ByVal Output Const(@Const @ByRef Scope scope, @Cast("const unsigned char") byte v, @Const @ByVal TensorShape shape); @Namespace("tensorflow::ops") public static native @ByVal Output Const(@Const @ByRef Scope scope, short v, @Const @ByVal TensorShape shape); @Namespace("tensorflow::ops") public static native @ByVal Output Const(@Const @ByRef Scope scope, int v, @Const @ByVal TensorShape shape); @Namespace("tensorflow::ops") public static native @ByVal Output Const(@Const @ByRef Scope scope, @Cast("const long long") long v, @Const @ByVal TensorShape shape); @Namespace("tensorflow::ops") public static native @ByVal Output Const(@Const @ByRef Scope scope, float v, @Const @ByVal TensorShape shape); @Namespace("tensorflow::ops") public static native @ByVal Output Const(@Const @ByRef Scope scope, double v, @Const @ByVal TensorShape shape); @Namespace("tensorflow::ops") public static native @ByVal Output Const(@Const @ByRef Scope scope, @Cast("const bool") boolean v, @Const @ByVal TensorShape shape); @Namespace("tensorflow::ops") public static native @ByVal Output Const(@Const @ByRef Scope scope, @StdString BytePointer v, @Const @ByVal TensorShape shape); @Namespace("tensorflow::ops") public static native @ByVal Output Const(@Const @ByRef Scope scope, @StdString String v, @Const @ByVal TensorShape shape); @Namespace("tensorflow::ops") public static native @ByVal NodeOutVector AsNodeOutList(@Const @ByRef Scope scope, @Const @ByRef InputList inp); /** }\ */ // namespace ops // namespace tensorflow // #endif // TENSORFLOW_CC_OPS_CONST_OP_H_ // Parsed from tensorflow/cc/ops/array_ops.h // This file is MACHINE GENERATED! Do not edit. // #ifndef TENSORFLOW_CC_OPS_ARRAY_OPS_H_ // #define TENSORFLOW_CC_OPS_ARRAY_OPS_H_ // This file is MACHINE GENERATED! Do not edit. // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/cc/framework/scope.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/gtl/array_slice.h" /** \defgroup array_ops Array Ops * \{

* BatchToSpace for 4-D tensors of type T. * * This is a legacy version of the more general BatchToSpaceND. * * Rearranges (permutes) data from batch into blocks of spatial data, followed by * cropping. This is the reverse transformation of SpaceToBatch. More specifically, * this op outputs a copy of the input tensor where values from the {@code batch} * dimension are moved in spatial blocks to the {@code height} and {@code width} dimensions, * followed by cropping along the {@code height} and {@code width} dimensions. * * Arguments: * * scope: A Scope object * * input: 4-D tensor with shape * {@code [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, * depth]}. Note that the batch size of the input tensor must be divisible by * {@code block_size * block_size}. * * crops: 2-D tensor of non-negative integers with shape {@code [2, 2]}. It specifies * how many elements to crop from the intermediate result across the spatial * dimensions as follows: * * crops = [[crop_top, crop_bottom], [crop_left, crop_right]] * * Returns: * * {@code Output}: 4-D with shape {@code [batch, height, width, depth]}, where: * * height = height_pad - crop_top - crop_bottom * width = width_pad - crop_left - crop_right * * The attr {@code block_size} must be greater than one. It indicates the block size. * * Some examples: * * (1) For the following input of shape {@code [4, 1, 1, 1]} and block_size of 2: * *

{@code
 *  [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
 *  }
* * The output tensor has shape {@code [1, 2, 2, 1]} and value: * *
{@code
 *  x = [[[[1], [2]], [[3], [4]]]]
 *  }
* * (2) For the following input of shape {@code [4, 1, 1, 3]} and block_size of 2: * *
{@code
 *  [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
 *  }
* * The output tensor has shape {@code [1, 2, 2, 3]} and value: * *
{@code
 *  x = [[[[1, 2, 3], [4, 5, 6]],
 *        [[7, 8, 9], [10, 11, 12]]]]
 *  }
* * (3) For the following input of shape {@code [4, 2, 2, 1]} and block_size of 2: * *
{@code
 *  x = [[[[1], [3]], [[9], [11]]],
 *       [[[2], [4]], [[10], [12]]],
 *       [[[5], [7]], [[13], [15]]],
 *       [[[6], [8]], [[14], [16]]]]
 *  }
* * The output tensor has shape {@code [1, 4, 4, 1]} and value: * *
{@code
 *  x = [[[1],   [2],  [3],  [4]],
 *       [[5],   [6],  [7],  [8]],
 *       [[9],  [10], [11],  [12]],
 *       [[13], [14], [15],  [16]]]
 *  }
* * (4) For the following input of shape {@code [8, 1, 2, 1]} and block_size of 2: * *
{@code
 *  x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
 *       [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
 *  }
* * The output tensor has shape {@code [2, 2, 4, 1]} and value: * *
{@code
 *  x = [[[[1], [3]], [[5], [7]]],
 *       [[[2], [4]], [[10], [12]]],
 *       [[[5], [7]], [[13], [15]]],
 *       [[[6], [8]], [[14], [16]]]]
 *  }
*/ @Namespace("tensorflow::ops") @NoOffset public static class BatchToSpace extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchToSpace(Pointer p) { super(p); } public BatchToSpace(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input crops, @Cast("tensorflow::int64") long block_size) { super((Pointer)null); allocate(scope, input, crops, block_size); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input crops, @Cast("tensorflow::int64") long block_size); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native BatchToSpace operation(Operation operation); public native @ByRef Output output(); public native BatchToSpace output(Output output); } /** BatchToSpace for N-D tensors of type T. * * This operation reshapes the "batch" dimension 0 into {@code M + 1} dimensions of shape * {@code block_shape + [batch]}, interleaves these blocks back into the grid defined by * the spatial dimensions {@code [1, ..., M]}, to obtain a result with the same rank as * the input. The spatial dimensions of this intermediate result are then * optionally cropped according to {@code crops} to produce the output. This is the * reverse of SpaceToBatch. See below for a precise description. * * Arguments: * * scope: A Scope object * * input: N-D with shape {@code input_shape = [batch] + spatial_shape + remaining_shape}, * where spatial_shape has M dimensions. * * block_shape: 1-D with shape {@code [M]}, all values must be >= 1. * * crops: 2-D with shape {@code [M, 2]}, all values must be >= 0. * {@code crops[i] = [crop_start, crop_end]} specifies the amount to crop from input * dimension {@code i + 1}, which corresponds to spatial dimension {@code i}. It is * required that * {@code crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]}. * * This operation is equivalent to the following steps: * * 1. Reshape {@code input} to {@code reshaped} of shape: * [block_shape[0], ..., block_shape[M-1], * batch / prod(block_shape), * input_shape[1], ..., input_shape[N-1]] * * 2. Permute dimensions of {@code reshaped} to produce {@code permuted} of shape * [batch / prod(block_shape), * * input_shape[1], block_shape[0], * ..., * input_shape[M], block_shape[M-1], * * input_shape[M+1], ..., input_shape[N-1]] * * 3. Reshape {@code permuted} to produce {@code reshaped_permuted} of shape * [batch / prod(block_shape), * * input_shape[1] * block_shape[0], * ..., * input_shape[M] * block_shape[M-1], * * input_shape[M+1], * ..., * input_shape[N-1]] * * 4. Crop the start and end of dimensions {@code [1, ..., M]} of * {@code reshaped_permuted} according to {@code crops} to produce the output of shape: * [batch / prod(block_shape), * * input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], * ..., * input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], * * input_shape[M+1], ..., input_shape[N-1]] * * Some examples: * * (1) For the following input of shape {@code [4, 1, 1, 1]}, {@code block_shape = [2, 2]}, and * {@code crops = [[0, 0], [0, 0]]}: * *
{@code
 *  [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
 *  }
* * The output tensor has shape {@code [1, 2, 2, 1]} and value: * *
{@code
 *  x = [[[[1], [2]], [[3], [4]]]]
 *  }
* * (2) For the following input of shape {@code [4, 1, 1, 3]}, {@code block_shape = [2, 2]}, and * {@code crops = [[0, 0], [0, 0]]}: * *
{@code
 *  [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
 *  }
* * The output tensor has shape {@code [1, 2, 2, 3]} and value: * *
{@code
 *  x = [[[[1, 2, 3], [4, 5, 6]],
 *        [[7, 8, 9], [10, 11, 12]]]]
 *  }
* * (3) For the following input of shape {@code [4, 2, 2, 1]}, {@code block_shape = [2, 2]}, and * {@code crops = [[0, 0], [0, 0]]}: * *
{@code
 *  x = [[[[1], [3]], [[9], [11]]],
 *       [[[2], [4]], [[10], [12]]],
 *       [[[5], [7]], [[13], [15]]],
 *       [[[6], [8]], [[14], [16]]]]
 *  }
* * The output tensor has shape {@code [1, 4, 4, 1]} and value: * *
{@code
 *  x = [[[1],   [2],  [3],  [4]],
 *       [[5],   [6],  [7],  [8]],
 *       [[9],  [10], [11],  [12]],
 *       [[13], [14], [15],  [16]]]
 *  }
* * (4) For the following input of shape {@code [8, 1, 3, 1]}, {@code block_shape = [2, 2]}, and * {@code crops = [[0, 0], [2, 0]]}: * *
{@code
 *  x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
 *       [[[0], [2], [4]]], [[[0], [10], [12]]],
 *       [[[0], [5], [7]]], [[[0], [13], [15]]],
 *       [[[0], [6], [8]]], [[[0], [14], [16]]]]
 *  }
* * The output tensor has shape {@code [2, 2, 4, 1]} and value: * *
{@code
 *  x = [[[[1],   [2],  [3],  [4]],
 *        [[5],   [6],  [7],  [8]]],
 *       [[[9],  [10], [11],  [12]],
 *        [[13], [14], [15],  [16]]]]
 *  }
* * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class BatchToSpaceND extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchToSpaceND(Pointer p) { super(p); } public BatchToSpaceND(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input block_shape, @ByVal Input crops) { super((Pointer)null); allocate(scope, input, block_shape, crops); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input block_shape, @ByVal Input crops); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native BatchToSpaceND operation(Operation operation); public native @ByRef Output output(); public native BatchToSpaceND output(Output output); } /** Bitcasts a tensor from one type to another without copying data. * * Given a tensor {@code input}, this operation returns a tensor that has the same buffer * data as {@code input} with datatype {@code type}. * * If the input datatype {@code T} is larger than the output datatype {@code type} then the * shape changes from [...] to [..., sizeof({@code T})/sizeof({@code type})]. * * If {@code T} is smaller than {@code type}, the operator requires that the rightmost * dimension be equal to sizeof({@code type})/sizeof({@code T}). The shape then goes from * [..., sizeof({@code type})/sizeof({@code T})] to [...]. * * *NOTE*: Bitcast is implemented as a low-level cast, so machines with different * endian orderings will give different results. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Bitcast extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Bitcast(Pointer p) { super(p); } public Bitcast(@Const @ByRef Scope scope, @ByVal Input input, @Cast("tensorflow::DataType") int type) { super((Pointer)null); allocate(scope, input, type); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Cast("tensorflow::DataType") int type); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Bitcast operation(Operation operation); public native @ByRef Output output(); public native Bitcast output(Output output); } /** Return the shape of s0 op s1 with broadcast. * * Given {@code s0} and {@code s1}, tensors that represent shapes, compute {@code r0}, the * broadcasted shape. {@code s0}, {@code s1} and {@code r0} are all integer vectors. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The r0 tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class BroadcastDynamicShape extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BroadcastDynamicShape(Pointer p) { super(p); } public BroadcastDynamicShape(@Const @ByRef Scope scope, @ByVal Input s0, @ByVal Input s1) { super((Pointer)null); allocate(scope, s0, s1); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input s0, @ByVal Input s1); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native BroadcastDynamicShape operation(Operation operation); public native @ByRef Output r0(); public native BroadcastDynamicShape r0(Output r0); } /** Broadcast an array for a compatible shape. * * Broadcasting is the process of making arrays to have compatible shapes * for arithmetic operations. Two shapes are compatible if for each * dimension pair they are either equal or one of them is one. When trying * to broadcast a Tensor to a shape, it starts with the trailing dimensions, * and works its way forward. * * For example, *
{@code
 *  >>> x = tf.constant([1, 2, 3])
 *  >>> y = tf.broadcast_to(x, [3, 3])
 *  >>> sess.run(y)
 *  array([[1, 2, 3],
 *         [1, 2, 3],
 *         [1, 2, 3]], dtype=int32)
 *  }
* In the above example, the input Tensor with the shape of {@code [1, 3]} * is broadcasted to output Tensor with shape of {@code [3, 3]}. * * Arguments: * * scope: A Scope object * * input: A Tensor to broadcast. * * shape: An 1-D {@code int} Tensor. The shape of the desired output. * * Returns: * * {@code Output}: A Tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class BroadcastTo extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BroadcastTo(Pointer p) { super(p); } public BroadcastTo(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input shape) { super((Pointer)null); allocate(scope, input, shape); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input shape); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native BroadcastTo operation(Operation operation); public native @ByRef Output output(); public native BroadcastTo output(Output output); } /** Checks a tensor for NaN and Inf values. * * When run, reports an {@code InvalidArgument} error if {@code tensor} has any values * that are not a number (NaN) or infinity (Inf). Otherwise, passes {@code tensor} as-is. * * Arguments: * * scope: A Scope object * * message: Prefix of the error message. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class CheckNumerics extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CheckNumerics(Pointer p) { super(p); } public CheckNumerics(@Const @ByRef Scope scope, @ByVal Input tensor, @StringPiece BytePointer message) { super((Pointer)null); allocate(scope, tensor, message); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input tensor, @StringPiece BytePointer message); public CheckNumerics(@Const @ByRef Scope scope, @ByVal Input tensor, @StringPiece String message) { super((Pointer)null); allocate(scope, tensor, message); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input tensor, @StringPiece String message); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native CheckNumerics operation(Operation operation); public native @ByRef Output output(); public native CheckNumerics output(Output output); } /** Concatenates tensors along one dimension. * * Arguments: * * scope: A Scope object * * values: List of {@code N} Tensors to concatenate. Their ranks and types must match, * and their sizes must match in all dimensions except {@code concat_dim}. * * axis: 0-D. The dimension along which to concatenate. Must be in the * range [-rank(values), rank(values)). * * Returns: * * {@code Output}: A {@code Tensor} with the concatenation of values stacked along the * {@code concat_dim} dimension. This tensor's shape matches that of {@code values} except * in {@code concat_dim} where it has the sum of the sizes. */ @Namespace("tensorflow::ops") @NoOffset public static class Concat extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Concat(Pointer p) { super(p); } public Concat(@Const @ByRef Scope scope, @ByVal InputList values, @ByVal Input axis) { super((Pointer)null); allocate(scope, values, axis); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList values, @ByVal Input axis); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Concat operation(Operation operation); public native @ByRef Output output(); public native Concat output(Output output); } /** Shuffle dimensions of x according to a permutation and conjugate the result. * * The output {@code y} has the same rank as {@code x}. The shapes of {@code x} and {@code y} satisfy: * {@code y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]} * {@code y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])} * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class ConjugateTranspose extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConjugateTranspose(Pointer p) { super(p); } public ConjugateTranspose(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input perm) { super((Pointer)null); allocate(scope, x, perm); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input perm); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native ConjugateTranspose operation(Operation operation); public native @ByRef Output y(); public native ConjugateTranspose y(Output y); } /** Identity op for gradient debugging. * * This op is hidden from public in Python. It is used by TensorFlow Debugger to * register gradient tensors for gradient debugging. * This op operates on non-reference-type tensors. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class DebugGradientIdentity extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DebugGradientIdentity(Pointer p) { super(p); } public DebugGradientIdentity(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native DebugGradientIdentity operation(Operation operation); public native @ByRef Output output(); public native DebugGradientIdentity output(Output output); } /** Identity op for gradient debugging. * * This op is hidden from public in Python. It is used by TensorFlow Debugger to * register gradient tensors for gradient debugging. * This op operates on reference-type tensors. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class DebugGradientRefIdentity extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DebugGradientRefIdentity(Pointer p) { super(p); } public DebugGradientRefIdentity(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native DebugGradientRefIdentity operation(Operation operation); public native @ByRef Output output(); public native DebugGradientRefIdentity output(Output output); } /** Makes a copy of {@code x}. * * Arguments: * * scope: A Scope object * * x: The source tensor of type {@code T}. * * Returns: * * {@code Output}: y: A {@code Tensor} of type {@code T}. A copy of {@code x}. Guaranteed that {@code y} * is not an alias of {@code x}. */ @Namespace("tensorflow::ops") @NoOffset public static class DeepCopy extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DeepCopy(Pointer p) { super(p); } public DeepCopy(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native DeepCopy operation(Operation operation); public native @ByRef Output y(); public native DeepCopy y(Output y); } /** DepthToSpace for tensors of type T. * * Rearranges data from depth into blocks of spatial data. * This is the reverse transformation of SpaceToDepth. More specifically, * this op outputs a copy of the input tensor where values from the {@code depth} * dimension are moved in spatial blocks to the {@code height} and {@code width} dimensions. * The attr {@code block_size} indicates the input block size and how the data is moved. * * * Chunks of data of size {@code block_size * block_size} from depth are rearranged * into non-overlapping blocks of size {@code block_size x block_size} * * The width the output tensor is {@code input_depth * block_size}, whereas the * height is {@code input_height * block_size}. * * The Y, X coordinates within each block of the output image are determined * by the high order component of the input channel index. * * The depth of the input tensor must be divisible by * {@code block_size * block_size}. * * The {@code data_format} attr specifies the layout of the input and output tensors * with the following options: * "NHWC": {@code [ batch, height, width, channels ]} * "NCHW": {@code [ batch, channels, height, width ]} * "NCHW_VECT_C": * {@code qint8 [ batch, channels / 4, height, width, 4 ]} * * It is useful to consider the operation as transforming a 6-D Tensor. * e.g. for data_format = NHWC, * Each element in the input tensor can be specified via 6 coordinates, * ordered by decreasing memory layout significance as: * n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates * within the input image, bX, bY means coordinates * within the output block, oC means output channels). * The output would be the input transposed to the following layout: * n,iY,bY,iX,bX,oC * * This operation is useful for resizing the activations between convolutions * (but keeping all data), e.g. instead of pooling. It is also useful for training * purely convolutional models. * * For example, given an input of shape {@code [1, 1, 1, 4]}, data_format = "NHWC" and * block_size = 2: * *
{@code
 *  x = [[[[1, 2, 3, 4]]]]
 * 
 *  }
* * This operation will output a tensor of shape {@code [1, 2, 2, 1]}: * *
{@code
 *     [[[[1], [2]],
 *       [[3], [4]]]]
 *  }
* * Here, the input has a batch of 1 and each batch element has shape {@code [1, 1, 4]}, * the corresponding output will have 2x2 elements and will have a depth of * 1 channel (1 = {@code 4 / (block_size * block_size)}). * The output element shape is {@code [2, 2, 1]}. * * For an input tensor with larger depth, here of shape {@code [1, 1, 1, 12]}, e.g. * *
{@code
 *  x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
 *  }
* * This operation, for block size of 2, will return the following tensor of shape * {@code [1, 2, 2, 3]} * *
{@code
 *     [[[[1, 2, 3], [4, 5, 6]],
 *       [[7, 8, 9], [10, 11, 12]]]]
 * 
 *  }
* * Similarly, for the following input of shape {@code [1 2 2 4]}, and a block size of 2: * *
{@code
 *  x =  [[[[1, 2, 3, 4],
 *         [5, 6, 7, 8]],
 *        [[9, 10, 11, 12],
 *         [13, 14, 15, 16]]]]
 *  }
* * the operator will return the following tensor of shape {@code [1 4 4 1]}: * *
{@code
 *  x = [[[ [1],   [2],  [5],  [6]],
 *        [ [3],   [4],  [7],  [8]],
 *        [ [9],  [10], [13],  [14]],
 *        [ [11], [12], [15],  [16]]]]
 * 
 *  }
* * Arguments: * * scope: A Scope object * * block_size: The size of the spatial block, same as in Space2Depth. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class DepthToSpace extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DepthToSpace(Pointer p) { super(p); } /** Optional attribute setters for DepthToSpace */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to "NHWC" */ public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); } public DepthToSpace(@Const @ByRef Scope scope, @ByVal Input input, @Cast("tensorflow::int64") long block_size) { super((Pointer)null); allocate(scope, input, block_size); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Cast("tensorflow::int64") long block_size); public DepthToSpace(@Const @ByRef Scope scope, @ByVal Input input, @Cast("tensorflow::int64") long block_size, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, block_size, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Cast("tensorflow::int64") long block_size, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public native @ByRef Operation operation(); public native DepthToSpace operation(Operation operation); public native @ByRef Output output(); public native DepthToSpace output(Output output); } /** Dequantize the 'input' tensor into a float Tensor. * * [min_range, max_range] are scalar floats that specify the range for * the 'input' data. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. * * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: * *
{@code
 *  if T == qint8, in[i] += (range(T) + 1)/ 2.0
 *  out[i] = min_range + (in[i]* (max_range - min_range) / range(T))
 *  }
* here {@code range(T) = numeric_limits::max() - numeric_limits::min()} * * *MIN_COMBINED Mode Example* * * If the input comes from a QuantizedRelu6, the output type is * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. * Dequantize on quint8 will take each value, cast to float, and multiply * by 6 / 255. * Note that if quantizedtype is qint8, the operation will additionally add * each value by 128 prior to casting. * * If the mode is 'MIN_FIRST', then this approach is used: * *
{@code c++
 *  num_discrete_values = 1 << (# of bits in T)
 *  range_adjust = num_discrete_values / (num_discrete_values - 1)
 *  range = (range_max - range_min) * range_adjust
 *  range_scale = range / num_discrete_values
 *  const double offset_input = static_cast(input) - lowest_quantized;
 *  result = range_min + ((input - numeric_limits::min()) * range_scale)
 *  }
* * *SCALED mode Example* * * {@code SCALED} mode matches the quantization approach used in * {@code QuantizeAndDequantize{V2|V3}}. * * If the mode is {@code SCALED}, we do not use the full range of the output type, * choosing to elide the lowest possible value for symmetry (e.g., output range is * -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to * 0. * * We first find the range of values in our tensor. The * range we use is always centered on 0, so we find m such that *
{@code c++
 *    m = max(abs(input_min), abs(input_max))
 *  }
* * Our input tensor range is then {@code [-m, m]}. * * Next, we choose our fixed-point quantization buckets, {@code [min_fixed, max_fixed]}. * If T is signed, this is *
{@code
 *    num_bits = sizeof(T) * 8
 *    [min_fixed, max_fixed] =
 *        [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]
 *  }
* * Otherwise, if T is unsigned, the fixed-point range is *
{@code
 *    [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]
 *  }
* * From this we compute our scaling factor, s: *
{@code c++
 *    s = (2 * m) / (max_fixed - min_fixed)
 *  }
* * Now we can dequantize the elements of our tensor: *
{@code c++
 *  result = input * s
 *  }
* * Arguments: * * scope: A Scope object * * min_range: The minimum scalar value possibly produced for the input. * * max_range: The maximum scalar value possibly produced for the input. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Dequantize extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dequantize(Pointer p) { super(p); } /** Optional attribute setters for Dequantize */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to "MIN_COMBINED" */ public native @ByVal Attrs Mode(@StringPiece BytePointer x); public native @ByVal Attrs Mode(@StringPiece String x); public native @StringPiece BytePointer mode_(); public native Attrs mode_(BytePointer mode_); } public Dequantize(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_range, @ByVal Input max_range) { super((Pointer)null); allocate(scope, input, min_range, max_range); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_range, @ByVal Input max_range); public Dequantize(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_range, @ByVal Input max_range, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, min_range, max_range, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_range, @ByVal Input max_range, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Mode(@StringPiece BytePointer x); public static native @ByVal Attrs Mode(@StringPiece String x); public native @ByRef Operation operation(); public native Dequantize operation(Operation operation); public native @ByRef Output output(); public native Dequantize output(Output output); } /** Returns a diagonal tensor with a given diagonal values. * * Given a {@code diagonal}, this operation returns a tensor with the {@code diagonal} and * everything else padded with zeros. The diagonal is computed as follows: * * Assume {@code diagonal} has dimensions [D1,..., Dk], then the output is a tensor of * rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: * * {@code output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]} and 0 everywhere else. * * For example: * *
{@code
 *  # 'diagonal' is [1, 2, 3, 4]
 *  tf.diag(diagonal) ==> [[1, 0, 0, 0]
 *                         [0, 2, 0, 0]
 *                         [0, 0, 3, 0]
 *                         [0, 0, 0, 4]]
 *  }
* * Arguments: * * scope: A Scope object * * diagonal: Rank k tensor where k is at most 1. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Diag extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Diag(Pointer p) { super(p); } public Diag(@Const @ByRef Scope scope, @ByVal Input diagonal) { super((Pointer)null); allocate(scope, diagonal); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input diagonal); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Diag operation(Operation operation); public native @ByRef Output output(); public native Diag output(Output output); } /** Returns the diagonal part of the tensor. * * This operation returns a tensor with the {@code diagonal} part * of the {@code input}. The {@code diagonal} part is computed as follows: * * Assume {@code input} has dimensions {@code [D1,..., Dk, D1,..., Dk]}, then the output is a * tensor of rank {@code k} with dimensions {@code [D1,..., Dk]} where: * * {@code diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]}. * * For example: * *
{@code
 *  # 'input' is [[1, 0, 0, 0]
 *                [0, 2, 0, 0]
 *                [0, 0, 3, 0]
 *                [0, 0, 0, 4]]
 * 
 *  tf.diag_part(input) ==> [1, 2, 3, 4]
 *  }
* * Arguments: * * scope: A Scope object * * input: Rank k tensor where k is even and not zero. * * Returns: * * {@code Output}: The extracted diagonal. */ @Namespace("tensorflow::ops") @NoOffset public static class DiagPart extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DiagPart(Pointer p) { super(p); } public DiagPart(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native DiagPart operation(Operation operation); public native @ByRef Output diagonal(); public native DiagPart diagonal(Output diagonal); } /** Computes the (possibly normalized) Levenshtein Edit Distance. * * The inputs are variable-length sequences provided by SparseTensors * (hypothesis_indices, hypothesis_values, hypothesis_shape) * and * (truth_indices, truth_values, truth_shape). * * The inputs are: * * Arguments: * * scope: A Scope object * * hypothesis_indices: The indices of the hypothesis list SparseTensor. * This is an N x R int64 matrix. * * hypothesis_values: The values of the hypothesis list SparseTensor. * This is an N-length vector. * * hypothesis_shape: The shape of the hypothesis list SparseTensor. * This is an R-length vector. * * truth_indices: The indices of the truth list SparseTensor. * This is an M x R int64 matrix. * * truth_values: The values of the truth list SparseTensor. * This is an M-length vector. * * truth_shape: truth indices, vector. * * Optional attributes (see {@code Attrs}): * * normalize: boolean (if true, edit distances are normalized by length of truth). * * The output is: * * Returns: * * {@code Output}: A dense float tensor with rank R - 1. * * For the example input: * * // hypothesis represents a 2x1 matrix with variable-length values: * // (0,0) = ["a"] * // (1,0) = ["b"] * hypothesis_indices = [[0, 0, 0], * [1, 0, 0]] * hypothesis_values = ["a", "b"] * hypothesis_shape = [2, 1, 1] * * // truth represents a 2x2 matrix with variable-length values: * // (0,0) = [] * // (0,1) = ["a"] * // (1,0) = ["b", "c"] * // (1,1) = ["a"] * truth_indices = [[0, 1, 0], * [1, 0, 0], * [1, 0, 1], * [1, 1, 0]] * truth_values = ["a", "b", "c", "a"] * truth_shape = [2, 2, 2] * normalize = true * * The output will be: * * // output is a 2x2 matrix with edit distances normalized by truth lengths. * output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis * [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis */ @Namespace("tensorflow::ops") @NoOffset public static class EditDistance extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EditDistance(Pointer p) { super(p); } /** Optional attribute setters for EditDistance */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** boolean (if true, edit distances are normalized by length of truth). * * The output is: * * Defaults to true */ public native @ByVal Attrs Normalize(@Cast("bool") boolean x); public native @Cast("bool") boolean normalize_(); public native Attrs normalize_(boolean normalize_); } public EditDistance(@Const @ByRef Scope scope, @ByVal Input hypothesis_indices, @ByVal Input hypothesis_values, @ByVal Input hypothesis_shape, @ByVal Input truth_indices, @ByVal Input truth_values, @ByVal Input truth_shape) { super((Pointer)null); allocate(scope, hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input hypothesis_indices, @ByVal Input hypothesis_values, @ByVal Input hypothesis_shape, @ByVal Input truth_indices, @ByVal Input truth_values, @ByVal Input truth_shape); public EditDistance(@Const @ByRef Scope scope, @ByVal Input hypothesis_indices, @ByVal Input hypothesis_values, @ByVal Input hypothesis_shape, @ByVal Input truth_indices, @ByVal Input truth_values, @ByVal Input truth_shape, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input hypothesis_indices, @ByVal Input hypothesis_values, @ByVal Input hypothesis_shape, @ByVal Input truth_indices, @ByVal Input truth_values, @ByVal Input truth_shape, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Normalize(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native EditDistance operation(Operation operation); public native @ByRef Output output(); public native EditDistance output(Output output); } /** Creates a tensor with the given shape. * * This operation creates a tensor of {@code shape} and {@code dtype}. * * Arguments: * * scope: A Scope object * * shape: 1-D. Represents the shape of the output tensor. * * Optional attributes (see {@code Attrs}): * * init: If True, initialize the returned tensor with the default value of dtype. Otherwise, the implementation is free not to initializethe tensor's content. * * Returns: * * {@code Output}: A {@code Tensor} of type {@code T}. */ @Namespace("tensorflow::ops") @NoOffset public static class Empty extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Empty(Pointer p) { super(p); } /** Optional attribute setters for Empty */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, initialize the returned tensor with the default value of dtype. Otherwise, the implementation is free not to initializethe tensor's content. * * Defaults to false */ public native @ByVal Attrs Init(@Cast("bool") boolean x); public native @Cast("bool") boolean init_(); public native Attrs init_(boolean init_); } public Empty(@Const @ByRef Scope scope, @ByVal Input shape, @Cast("tensorflow::DataType") int dtype) { super((Pointer)null); allocate(scope, shape, dtype); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input shape, @Cast("tensorflow::DataType") int dtype); public Empty(@Const @ByRef Scope scope, @ByVal Input shape, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, shape, dtype, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input shape, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Init(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native Empty operation(Operation operation); public native @ByRef Output output(); public native Empty output(Output output); } /** Ensures that the tensor's shape matches the expected shape. * * Raises an error if the input tensor's shape does not match the specified shape. * Returns the input tensor otherwise. * * Arguments: * * scope: A Scope object * * input: A tensor, whose shape is to be validated. * * shape: The expected (possibly partially specified) shape of the input tensor. * * Returns: * * {@code Output}: A tensor with the same shape and contents as the input tensor or value. */ @Namespace("tensorflow::ops") @NoOffset public static class EnsureShape extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EnsureShape(Pointer p) { super(p); } public EnsureShape(@Const @ByRef Scope scope, @ByVal Input input, @ByVal PartialTensorShape shape) { super((Pointer)null); allocate(scope, input, shape); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal PartialTensorShape shape); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native EnsureShape operation(Operation operation); public native @ByRef Output output(); public native EnsureShape output(Output output); } /** Inserts a dimension of 1 into a tensor's shape. * * Given a tensor {@code input}, this operation inserts a dimension of 1 at the * dimension index {@code axis} of {@code input}'s shape. The dimension index {@code axis} starts at * zero; if you specify a negative number for {@code axis} it is counted backward from * the end. * * This operation is useful if you want to add a batch dimension to a single * element. For example, if you have a single image of shape {@code [height, width, * channels]}, you can make it a batch of 1 image with {@code expand_dims(image, 0)}, * which will make the shape {@code [1, height, width, channels]}. * * Other examples: * *
{@code
 *  # 't' is a tensor of shape [2]
 *  shape(expand_dims(t, 0)) ==> [1, 2]
 *  shape(expand_dims(t, 1)) ==> [2, 1]
 *  shape(expand_dims(t, -1)) ==> [2, 1]
 * 
 *  # 't2' is a tensor of shape [2, 3, 5]
 *  shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
 *  shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
 *  shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
 *  }
* * This operation requires that: * * {@code -1-input.dims() <= dim <= input.dims()} * * This operation is related to {@code squeeze()}, which removes dimensions of * size 1. * * Arguments: * * scope: A Scope object * * axis: 0-D (scalar). Specifies the dimension index at which to * expand the shape of {@code input}. Must be in the range * {@code [-rank(input) - 1, rank(input)]}. * * Returns: * * {@code Output}: Contains the same data as {@code input}, but its shape has an additional * dimension of size 1 added. */ @Namespace("tensorflow::ops") @NoOffset public static class ExpandDims extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ExpandDims(Pointer p) { super(p); } public ExpandDims(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis) { super((Pointer)null); allocate(scope, input, axis); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native ExpandDims operation(Operation operation); public native @ByRef Output output(); public native ExpandDims output(Output output); } /** Extract {@code patches} from {@code images} and put them in the "depth" output dimension. * * Arguments: * * scope: A Scope object * * images: 4-D Tensor with shape {@code [batch, in_rows, in_cols, depth]}. * * ksizes: The size of the sliding window for each dimension of {@code images}. * * strides: 1-D of length 4. How far the centers of two consecutive patches are in * the images. Must be: {@code [1, stride_rows, stride_cols, 1]}. * * rates: 1-D of length 4. Must be: {@code [1, rate_rows, rate_cols, 1]}. This is the * input stride, specifying how far two consecutive patch samples are in the * input. Equivalent to extracting patches with * {@code patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)}, followed by * subsampling them spatially by a factor of {@code rates}. This is equivalent to * {@code rate} in dilated (a.k.a. Atrous) convolutions. * * padding: The type of padding algorithm to use. * * We specify the size-related attributes as: * *
{@code python
 *        ksizes = [1, ksize_rows, ksize_cols, 1]
 *        strides = [1, strides_rows, strides_cols, 1]
 *        rates = [1, rates_rows, rates_cols, 1]
 *  }
* * Returns: * * {@code Output}: 4-D Tensor with shape {@code [batch, out_rows, out_cols, ksize_rows * * ksize_cols * depth]} containing image patches with size * {@code ksize_rows x ksize_cols x depth} vectorized in the "depth" dimension. Note * {@code out_rows} and {@code out_cols} are the dimensions of the output patches. */ @Namespace("tensorflow::ops") @NoOffset public static class ExtractImagePatches extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ExtractImagePatches(Pointer p) { super(p); } public ExtractImagePatches(@Const @ByRef Scope scope, @ByVal Input images, @ArraySlice IntPointer ksizes, @ArraySlice IntPointer strides, @ArraySlice IntPointer rates, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, images, ksizes, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images, @ArraySlice IntPointer ksizes, @ArraySlice IntPointer strides, @ArraySlice IntPointer rates, @StringPiece BytePointer padding); public ExtractImagePatches(@Const @ByRef Scope scope, @ByVal Input images, @ArraySlice IntBuffer ksizes, @ArraySlice IntBuffer strides, @ArraySlice IntBuffer rates, @StringPiece String padding) { super((Pointer)null); allocate(scope, images, ksizes, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images, @ArraySlice IntBuffer ksizes, @ArraySlice IntBuffer strides, @ArraySlice IntBuffer rates, @StringPiece String padding); public ExtractImagePatches(@Const @ByRef Scope scope, @ByVal Input images, @ArraySlice int[] ksizes, @ArraySlice int[] strides, @ArraySlice int[] rates, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, images, ksizes, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images, @ArraySlice int[] ksizes, @ArraySlice int[] strides, @ArraySlice int[] rates, @StringPiece BytePointer padding); public ExtractImagePatches(@Const @ByRef Scope scope, @ByVal Input images, @ArraySlice IntPointer ksizes, @ArraySlice IntPointer strides, @ArraySlice IntPointer rates, @StringPiece String padding) { super((Pointer)null); allocate(scope, images, ksizes, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images, @ArraySlice IntPointer ksizes, @ArraySlice IntPointer strides, @ArraySlice IntPointer rates, @StringPiece String padding); public ExtractImagePatches(@Const @ByRef Scope scope, @ByVal Input images, @ArraySlice IntBuffer ksizes, @ArraySlice IntBuffer strides, @ArraySlice IntBuffer rates, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, images, ksizes, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images, @ArraySlice IntBuffer ksizes, @ArraySlice IntBuffer strides, @ArraySlice IntBuffer rates, @StringPiece BytePointer padding); public ExtractImagePatches(@Const @ByRef Scope scope, @ByVal Input images, @ArraySlice int[] ksizes, @ArraySlice int[] strides, @ArraySlice int[] rates, @StringPiece String padding) { super((Pointer)null); allocate(scope, images, ksizes, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images, @ArraySlice int[] ksizes, @ArraySlice int[] strides, @ArraySlice int[] rates, @StringPiece String padding); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native ExtractImagePatches operation(Operation operation); public native @ByRef Output patches(); public native ExtractImagePatches patches(Output patches); } /** Extract {@code patches} from {@code input} and put them in the "depth" output * dimension. 3D extension of {@code extract_image_patches}. * * Arguments: * * scope: A Scope object * * input: 5-D Tensor with shape {@code [batch, in_planes, in_rows, in_cols, depth]}. * * ksizes: The size of the sliding window for each dimension of {@code input}. * * strides: 1-D of length 5. How far the centers of two consecutive patches are in * {@code input}. Must be: {@code [1, stride_planes, stride_rows, stride_cols, 1]}. * * padding: The type of padding algorithm to use. * * We specify the size-related attributes as: * *
{@code python
 *        ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]
 *        strides = [1, stride_planes, strides_rows, strides_cols, 1]
 *  }
* * Returns: * * {@code Output}: 5-D Tensor with shape {@code [batch, out_planes, out_rows, out_cols, * ksize_planes * ksize_rows * ksize_cols * depth]} containing patches * with size {@code ksize_planes x ksize_rows x ksize_cols x depth} vectorized * in the "depth" dimension. Note {@code out_planes}, {@code out_rows} and {@code out_cols} * are the dimensions of the output patches. */ @Namespace("tensorflow::ops") @NoOffset public static class ExtractVolumePatches extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ExtractVolumePatches(Pointer p) { super(p); } public ExtractVolumePatches(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksizes, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, ksizes, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksizes, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public ExtractVolumePatches(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksizes, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, ksizes, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksizes, @ArraySlice IntBuffer strides, @StringPiece String padding); public ExtractVolumePatches(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksizes, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, ksizes, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksizes, @ArraySlice int[] strides, @StringPiece BytePointer padding); public ExtractVolumePatches(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksizes, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, ksizes, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksizes, @ArraySlice IntPointer strides, @StringPiece String padding); public ExtractVolumePatches(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksizes, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, ksizes, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksizes, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public ExtractVolumePatches(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksizes, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, ksizes, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksizes, @ArraySlice int[] strides, @StringPiece String padding); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native ExtractVolumePatches operation(Operation operation); public native @ByRef Output patches(); public native ExtractVolumePatches patches(Output patches); } /** Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type. * * Attributes {@code [min; max]} define the clamping range for the {@code inputs} data. * {@code inputs} values are quantized into the quantization range ({@code [0; 2^num_bits - 1]} * when {@code narrow_range} is false and {@code [1; 2^num_bits - 1]} when it is true) and * then de-quantized and output as floats in {@code [min; max]} interval. * {@code num_bits} is the bitwidth of the quantization; between 2 and 16, inclusive. * * Quantization is called fake since the output is still in floating point. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The outputs tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class FakeQuantWithMinMaxArgs extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FakeQuantWithMinMaxArgs(Pointer p) { super(p); } /** Optional attribute setters for FakeQuantWithMinMaxArgs */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to -6 */ public native @ByVal Attrs Min(float x); /** Defaults to 6 */ public native @ByVal Attrs Max(float x); /** Defaults to 8 */ public native @ByVal Attrs NumBits(@Cast("tensorflow::int64") long x); /** Defaults to false */ public native @ByVal Attrs NarrowRange(@Cast("bool") boolean x); public native float min_(); public native Attrs min_(float min_); public native float max_(); public native Attrs max_(float max_); public native @Cast("tensorflow::int64") long num_bits_(); public native Attrs num_bits_(long num_bits_); public native @Cast("bool") boolean narrow_range_(); public native Attrs narrow_range_(boolean narrow_range_); } public FakeQuantWithMinMaxArgs(@Const @ByRef Scope scope, @ByVal Input inputs) { super((Pointer)null); allocate(scope, inputs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input inputs); public FakeQuantWithMinMaxArgs(@Const @ByRef Scope scope, @ByVal Input inputs, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, inputs, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input inputs, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Min(float x); public static native @ByVal Attrs Max(float x); public static native @ByVal Attrs NumBits(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs NarrowRange(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native FakeQuantWithMinMaxArgs operation(Operation operation); public native @ByRef Output outputs(); public native FakeQuantWithMinMaxArgs outputs(Output outputs); } /** Compute gradients for a FakeQuantWithMinMaxArgs operation. * * Arguments: * * scope: A Scope object * * gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation. * * inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation. * * Returns: * * {@code Output}: Backpropagated gradients below the FakeQuantWithMinMaxArgs operation: * {@code gradients * (inputs >= min && inputs <= max)}. */ @Namespace("tensorflow::ops") @NoOffset public static class FakeQuantWithMinMaxArgsGradient extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FakeQuantWithMinMaxArgsGradient(Pointer p) { super(p); } /** Optional attribute setters for FakeQuantWithMinMaxArgsGradient */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to -6 */ public native @ByVal Attrs Min(float x); /** Defaults to 6 */ public native @ByVal Attrs Max(float x); /** Defaults to 8 */ public native @ByVal Attrs NumBits(@Cast("tensorflow::int64") long x); /** Defaults to false */ public native @ByVal Attrs NarrowRange(@Cast("bool") boolean x); public native float min_(); public native Attrs min_(float min_); public native float max_(); public native Attrs max_(float max_); public native @Cast("tensorflow::int64") long num_bits_(); public native Attrs num_bits_(long num_bits_); public native @Cast("bool") boolean narrow_range_(); public native Attrs narrow_range_(boolean narrow_range_); } public FakeQuantWithMinMaxArgsGradient(@Const @ByRef Scope scope, @ByVal Input gradients, @ByVal Input inputs) { super((Pointer)null); allocate(scope, gradients, inputs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input gradients, @ByVal Input inputs); public FakeQuantWithMinMaxArgsGradient(@Const @ByRef Scope scope, @ByVal Input gradients, @ByVal Input inputs, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, gradients, inputs, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input gradients, @ByVal Input inputs, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Min(float x); public static native @ByVal Attrs Max(float x); public static native @ByVal Attrs NumBits(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs NarrowRange(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native FakeQuantWithMinMaxArgsGradient operation(Operation operation); public native @ByRef Output backprops(); public native FakeQuantWithMinMaxArgsGradient backprops(Output backprops); } /** Fake-quantize the 'inputs' tensor of type float via global float scalars {@code min} * * and {@code max} to 'outputs' tensor of same shape as {@code inputs}. * * {@code [min; max]} define the clamping range for the {@code inputs} data. * {@code inputs} values are quantized into the quantization range ({@code [0; 2^num_bits - 1]} * when {@code narrow_range} is false and {@code [1; 2^num_bits - 1]} when it is true) and * then de-quantized and output as floats in {@code [min; max]} interval. * {@code num_bits} is the bitwidth of the quantization; between 2 and 16, inclusive. * * This operation has a gradient and thus allows for training {@code min} and {@code max} * values. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The outputs tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class FakeQuantWithMinMaxVars extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FakeQuantWithMinMaxVars(Pointer p) { super(p); } /** Optional attribute setters for FakeQuantWithMinMaxVars */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 8 */ public native @ByVal Attrs NumBits(@Cast("tensorflow::int64") long x); /** Defaults to false */ public native @ByVal Attrs NarrowRange(@Cast("bool") boolean x); public native @Cast("tensorflow::int64") long num_bits_(); public native Attrs num_bits_(long num_bits_); public native @Cast("bool") boolean narrow_range_(); public native Attrs narrow_range_(boolean narrow_range_); } public FakeQuantWithMinMaxVars(@Const @ByRef Scope scope, @ByVal Input inputs, @ByVal Input min, @ByVal Input max) { super((Pointer)null); allocate(scope, inputs, min, max); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input inputs, @ByVal Input min, @ByVal Input max); public FakeQuantWithMinMaxVars(@Const @ByRef Scope scope, @ByVal Input inputs, @ByVal Input min, @ByVal Input max, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, inputs, min, max, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input inputs, @ByVal Input min, @ByVal Input max, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs NumBits(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs NarrowRange(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native FakeQuantWithMinMaxVars operation(Operation operation); public native @ByRef Output outputs(); public native FakeQuantWithMinMaxVars outputs(Output outputs); } /** Compute gradients for a FakeQuantWithMinMaxVars operation. * * Arguments: * * scope: A Scope object * * gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation. * * inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation. * min, max: Quantization interval, scalar floats. * * Optional attributes (see {@code Attrs}): * * num_bits: The bitwidth of the quantization; between 2 and 8, inclusive. * * narrow_range: Whether to quantize into 2^num_bits - 1 distinct values. * * Returns: * * {@code Output} backprops_wrt_input: Backpropagated gradients w.r.t. inputs: * {@code gradients * (inputs >= min && inputs <= max)}. * * {@code Output} backprop_wrt_min: Backpropagated gradients w.r.t. min parameter: * {@code sum(gradients * (inputs < min))}. * * {@code Output} backprop_wrt_max: Backpropagated gradients w.r.t. max parameter: * {@code sum(gradients * (inputs > max))}. */ @Namespace("tensorflow::ops") @NoOffset public static class FakeQuantWithMinMaxVarsGradient extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FakeQuantWithMinMaxVarsGradient(Pointer p) { super(p); } /** Optional attribute setters for FakeQuantWithMinMaxVarsGradient */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The bitwidth of the quantization; between 2 and 8, inclusive. * * Defaults to 8 */ /// public native @ByVal Attrs NumBits(@Cast("tensorflow::int64") long x); /** Whether to quantize into 2^num_bits - 1 distinct values. * * Defaults to false */ public native @ByVal Attrs NarrowRange(@Cast("bool") boolean x); public native @Cast("tensorflow::int64") long num_bits_(); public native Attrs num_bits_(long num_bits_); public native @Cast("bool") boolean narrow_range_(); public native Attrs narrow_range_(boolean narrow_range_); } public FakeQuantWithMinMaxVarsGradient(@Const @ByRef Scope scope, @ByVal Input gradients, @ByVal Input inputs, @ByVal Input min, @ByVal Input max) { super((Pointer)null); allocate(scope, gradients, inputs, min, max); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input gradients, @ByVal Input inputs, @ByVal Input min, @ByVal Input max); public FakeQuantWithMinMaxVarsGradient(@Const @ByRef Scope scope, @ByVal Input gradients, @ByVal Input inputs, @ByVal Input min, @ByVal Input max, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, gradients, inputs, min, max, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input gradients, @ByVal Input inputs, @ByVal Input min, @ByVal Input max, @Const @ByRef Attrs attrs); public static native @ByVal Attrs NumBits(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs NarrowRange(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native FakeQuantWithMinMaxVarsGradient operation(Operation operation); public native @ByRef Output backprops_wrt_input(); public native FakeQuantWithMinMaxVarsGradient backprops_wrt_input(Output backprops_wrt_input); public native @ByRef Output backprop_wrt_min(); public native FakeQuantWithMinMaxVarsGradient backprop_wrt_min(Output backprop_wrt_min); public native @ByRef Output backprop_wrt_max(); public native FakeQuantWithMinMaxVarsGradient backprop_wrt_max(Output backprop_wrt_max); } /** Fake-quantize the 'inputs' tensor of type float and one of the shapes: {@code [d]}, * * {@code [b, d]} {@code [b, h, w, d]} via per-channel floats {@code min} and {@code max} of shape {@code [d]} * to 'outputs' tensor of same shape as {@code inputs}. * * {@code [min; max]} define the clamping range for the {@code inputs} data. * {@code inputs} values are quantized into the quantization range ({@code [0; 2^num_bits - 1]} * when {@code narrow_range} is false and {@code [1; 2^num_bits - 1]} when it is true) and * then de-quantized and output as floats in {@code [min; max]} interval. * {@code num_bits} is the bitwidth of the quantization; between 2 and 16, inclusive. * * This operation has a gradient and thus allows for training {@code min} and {@code max} * values. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The outputs tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class FakeQuantWithMinMaxVarsPerChannel extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FakeQuantWithMinMaxVarsPerChannel(Pointer p) { super(p); } /** Optional attribute setters for FakeQuantWithMinMaxVarsPerChannel */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 8 */ public native @ByVal Attrs NumBits(@Cast("tensorflow::int64") long x); /** Defaults to false */ public native @ByVal Attrs NarrowRange(@Cast("bool") boolean x); public native @Cast("tensorflow::int64") long num_bits_(); public native Attrs num_bits_(long num_bits_); public native @Cast("bool") boolean narrow_range_(); public native Attrs narrow_range_(boolean narrow_range_); } public FakeQuantWithMinMaxVarsPerChannel(@Const @ByRef Scope scope, @ByVal Input inputs, @ByVal Input min, @ByVal Input max) { super((Pointer)null); allocate(scope, inputs, min, max); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input inputs, @ByVal Input min, @ByVal Input max); public FakeQuantWithMinMaxVarsPerChannel(@Const @ByRef Scope scope, @ByVal Input inputs, @ByVal Input min, @ByVal Input max, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, inputs, min, max, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input inputs, @ByVal Input min, @ByVal Input max, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs NumBits(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs NarrowRange(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native FakeQuantWithMinMaxVarsPerChannel operation(Operation operation); public native @ByRef Output outputs(); public native FakeQuantWithMinMaxVarsPerChannel outputs(Output outputs); } /** Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. * * Arguments: * * scope: A Scope object * * gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation, * shape one of: {@code [d]}, {@code [b, d]}, {@code [b, h, w, d]}. * * inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape * same as {@code gradients}. * min, max: Quantization interval, floats of shape {@code [d]}. * * Optional attributes (see {@code Attrs}): * * num_bits: The bitwidth of the quantization; between 2 and 16, inclusive. * * narrow_range: Whether to quantize into 2^num_bits - 1 distinct values. * * Returns: * * {@code Output} backprops_wrt_input: Backpropagated gradients w.r.t. inputs, shape same as * {@code inputs}: * {@code gradients * (inputs >= min && inputs <= max)}. * * {@code Output} backprop_wrt_min: Backpropagated gradients w.r.t. min parameter, shape {@code [d]}: * {@code sum_per_d(gradients * (inputs < min))}. * * {@code Output} backprop_wrt_max: Backpropagated gradients w.r.t. max parameter, shape {@code [d]}: * {@code sum_per_d(gradients * (inputs > max))}. */ @Namespace("tensorflow::ops") @NoOffset public static class FakeQuantWithMinMaxVarsPerChannelGradient extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FakeQuantWithMinMaxVarsPerChannelGradient(Pointer p) { super(p); } /** Optional attribute setters for FakeQuantWithMinMaxVarsPerChannelGradient */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The bitwidth of the quantization; between 2 and 16, inclusive. * * Defaults to 8 */ /// public native @ByVal Attrs NumBits(@Cast("tensorflow::int64") long x); /** Whether to quantize into 2^num_bits - 1 distinct values. * * Defaults to false */ public native @ByVal Attrs NarrowRange(@Cast("bool") boolean x); public native @Cast("tensorflow::int64") long num_bits_(); public native Attrs num_bits_(long num_bits_); public native @Cast("bool") boolean narrow_range_(); public native Attrs narrow_range_(boolean narrow_range_); } public FakeQuantWithMinMaxVarsPerChannelGradient(@Const @ByRef Scope scope, @ByVal Input gradients, @ByVal Input inputs, @ByVal Input min, @ByVal Input max) { super((Pointer)null); allocate(scope, gradients, inputs, min, max); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input gradients, @ByVal Input inputs, @ByVal Input min, @ByVal Input max); public FakeQuantWithMinMaxVarsPerChannelGradient(@Const @ByRef Scope scope, @ByVal Input gradients, @ByVal Input inputs, @ByVal Input min, @ByVal Input max, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, gradients, inputs, min, max, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input gradients, @ByVal Input inputs, @ByVal Input min, @ByVal Input max, @Const @ByRef Attrs attrs); public static native @ByVal Attrs NumBits(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs NarrowRange(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native FakeQuantWithMinMaxVarsPerChannelGradient operation(Operation operation); public native @ByRef Output backprops_wrt_input(); public native FakeQuantWithMinMaxVarsPerChannelGradient backprops_wrt_input(Output backprops_wrt_input); public native @ByRef Output backprop_wrt_min(); public native FakeQuantWithMinMaxVarsPerChannelGradient backprop_wrt_min(Output backprop_wrt_min); public native @ByRef Output backprop_wrt_max(); public native FakeQuantWithMinMaxVarsPerChannelGradient backprop_wrt_max(Output backprop_wrt_max); } /** Creates a tensor filled with a scalar value. * * This operation creates a tensor of shape {@code dims} and fills it with {@code value}. * * For example: * *
{@code
 *  # Output tensor has shape [2, 3].
 *  fill([2, 3], 9) ==> [[9, 9, 9]
 *                       [9, 9, 9]]
 *  }
* * {@code tf.fill} differs from {@code tf.constant} in a few ways: * * * {@code tf.fill} only supports scalar contents, whereas {@code tf.constant} supports * Tensor values. * * {@code tf.fill} creates an Op in the computation graph that constructs the actual * Tensor value at runtime. This is in contrast to {@code tf.constant} which embeds * the entire Tensor into the graph with a {@code Const} node. * * Because {@code tf.fill} evaluates at graph runtime, it supports dynamic shapes * based on other runtime Tensors, unlike {@code tf.constant}. * * Arguments: * * scope: A Scope object * * dims: 1-D. Represents the shape of the output tensor. * * value: 0-D (scalar). Value to fill the returned tensor. * * \compatibility(numpy) * Equivalent to np.full * \end_compatibility * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Fill extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Fill(Pointer p) { super(p); } public Fill(@Const @ByRef Scope scope, @ByVal Input dims, @ByVal Input value) { super((Pointer)null); allocate(scope, dims, value); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input dims, @ByVal Input value); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Fill operation(Operation operation); public native @ByRef Output output(); public native Fill output(Output output); } /** Gather slices from {@code params} according to {@code indices}. * * {@code indices} must be an integer tensor of any dimension (usually 0-D or 1-D). * Produces an output tensor with shape {@code indices.shape + params.shape[1:]} where: * *
{@code python
 *      # Scalar indices
 *      output[:, ..., :] = params[indices, :, ... :]
 * 
 *      # Vector indices
 *      output[i, :, ..., :] = params[indices[i], :, ... :]
 * 
 *      # Higher rank indices
 *      output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
 *  }
* * If {@code indices} is a permutation and {@code len(indices) == params.shape[0]} then * this operation will permute {@code params} accordingly. * * {@code validate_indices}: DEPRECATED. If this operation is assigned to CPU, values in * {@code indices} are always validated to be within range. If assigned to GPU, * out-of-bound indices result in safe but unspecified behavior, which may include * raising an error. * *
* *
* * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Gather extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Gather(Pointer p) { super(p); } /** Optional attribute setters for Gather */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to true */ public native @ByVal Attrs ValidateIndices(@Cast("bool") boolean x); public native @Cast("bool") boolean validate_indices_(); public native Attrs validate_indices_(boolean validate_indices_); } public Gather(@Const @ByRef Scope scope, @ByVal Input params, @ByVal Input indices) { super((Pointer)null); allocate(scope, params, indices); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input params, @ByVal Input indices); public Gather(@Const @ByRef Scope scope, @ByVal Input params, @ByVal Input indices, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, params, indices, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input params, @ByVal Input indices, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs ValidateIndices(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native Gather operation(Operation operation); public native @ByRef Output output(); public native Gather output(Output output); } /** Gather slices from {@code params} into a Tensor with shape specified by {@code indices}. * * {@code indices} is an K-dimensional integer tensor, best thought of as a * (K-1)-dimensional tensor of indices into {@code params}, where each element defines a * slice of {@code params}: * * output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]] * * Whereas in {@code tf.gather} {@code indices} defines slices into the first * dimension of {@code params}, in {@code tf.gather_nd}, {@code indices} defines slices into the * first {@code N} dimensions of {@code params}, where {@code N = indices.shape[-1]}. * * The last dimension of {@code indices} can be at most the rank of * {@code params}: * * indices.shape[-1] <= params.rank * * The last dimension of {@code indices} corresponds to elements * (if {@code indices.shape[-1] == params.rank}) or slices * (if {@code indices.shape[-1] < params.rank}) along dimension {@code indices.shape[-1]} * of {@code params}. The output tensor has shape * * indices.shape[:-1] + params.shape[indices.shape[-1]:] * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, a 0 is stored in the * corresponding output value. * * Some examples below. * * Simple indexing into a matrix: * *
{@code python
 *      indices = [[0, 0], [1, 1]]
 *      params = [['a', 'b'], ['c', 'd']]
 *      output = ['a', 'd']
 *  }
* * Slice indexing into a matrix: * *
{@code python
 *      indices = [[1], [0]]
 *      params = [['a', 'b'], ['c', 'd']]
 *      output = [['c', 'd'], ['a', 'b']]
 *  }
* * Indexing into a 3-tensor: * *
{@code python
 *      indices = [[1]]
 *      params = [[['a0', 'b0'], ['c0', 'd0']],
 *                [['a1', 'b1'], ['c1', 'd1']]]
 *      output = [[['a1', 'b1'], ['c1', 'd1']]]
 * 
 * 
 *      indices = [[0, 1], [1, 0]]
 *      params = [[['a0', 'b0'], ['c0', 'd0']],
 *                [['a1', 'b1'], ['c1', 'd1']]]
 *      output = [['c0', 'd0'], ['a1', 'b1']]
 * 
 * 
 *      indices = [[0, 0, 1], [1, 0, 1]]
 *      params = [[['a0', 'b0'], ['c0', 'd0']],
 *                [['a1', 'b1'], ['c1', 'd1']]]
 *      output = ['b0', 'b1']
 *  }
* * Batched indexing into a matrix: * *
{@code python
 *      indices = [[[0, 0]], [[0, 1]]]
 *      params = [['a', 'b'], ['c', 'd']]
 *      output = [['a'], ['b']]
 *  }
* * Batched slice indexing into a matrix: * *
{@code python
 *      indices = [[[1]], [[0]]]
 *      params = [['a', 'b'], ['c', 'd']]
 *      output = [[['c', 'd']], [['a', 'b']]]
 *  }
* * Batched indexing into a 3-tensor: * *
{@code python
 *      indices = [[[1]], [[0]]]
 *      params = [[['a0', 'b0'], ['c0', 'd0']],
 *                [['a1', 'b1'], ['c1', 'd1']]]
 *      output = [[[['a1', 'b1'], ['c1', 'd1']]],
 *                [[['a0', 'b0'], ['c0', 'd0']]]]
 * 
 *      indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
 *      params = [[['a0', 'b0'], ['c0', 'd0']],
 *                [['a1', 'b1'], ['c1', 'd1']]]
 *      output = [[['c0', 'd0'], ['a1', 'b1']],
 *                [['a0', 'b0'], ['c1', 'd1']]]
 * 
 * 
 *      indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
 *      params = [[['a0', 'b0'], ['c0', 'd0']],
 *                [['a1', 'b1'], ['c1', 'd1']]]
 *      output = [['b0', 'b1'], ['d0', 'c1']]
 *  }
* * See also {@code tf.gather} and {@code tf.batch_gather}. * * Arguments: * * scope: A Scope object * * params: The tensor from which to gather values. * * indices: Index tensor. * * Returns: * * {@code Output}: Values from {@code params} gathered from indices given by {@code indices}, with * shape {@code indices.shape[:-1] + params.shape[indices.shape[-1]:]}. */ @Namespace("tensorflow::ops") @NoOffset public static class GatherNd extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GatherNd(Pointer p) { super(p); } public GatherNd(@Const @ByRef Scope scope, @ByVal Input params, @ByVal Input indices) { super((Pointer)null); allocate(scope, params, indices); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input params, @ByVal Input indices); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native GatherNd operation(Operation operation); public native @ByRef Output output(); public native GatherNd output(Output output); } /** Gather slices from {@code params} axis {@code axis} according to {@code indices}. * * {@code indices} must be an integer tensor of any dimension (usually 0-D or 1-D). * Produces an output tensor with shape {@code params.shape[:axis] + indices.shape + * params.shape[axis + 1:]} where: * *
{@code python
 *      # Scalar indices (output is rank(params) - 1).
 *      output[a_0, ..., a_n, b_0, ..., b_n] =
 *        params[a_0, ..., a_n, indices, b_0, ..., b_n]
 * 
 *      # Vector indices (output is rank(params)).
 *      output[a_0, ..., a_n, i, b_0, ..., b_n] =
 *        params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
 * 
 *      # Higher rank indices (output is rank(params) + rank(indices) - 1).
 *      output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
 *        params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
 *  }
* *
* *
* * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, a 0 is stored in the * corresponding output value. * * See also {@code tf.batch_gather} and {@code tf.gather_nd}. * * Arguments: * * scope: A Scope object * * params: The tensor from which to gather values. Must be at least rank * {@code axis + 1}. * * indices: Index tensor. Must be in range {@code [0, params.shape[axis])}. * * axis: The axis in {@code params} to gather {@code indices} from. Defaults to the first * dimension. Supports negative indexes. * * Returns: * * {@code Output}: Values from {@code params} gathered from indices given by {@code indices}, with * shape {@code params.shape[:axis] + indices.shape + params.shape[axis + 1:]}. */ @Namespace("tensorflow::ops") @NoOffset public static class GatherV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GatherV2(Pointer p) { super(p); } public GatherV2(@Const @ByRef Scope scope, @ByVal Input params, @ByVal Input indices, @ByVal Input axis) { super((Pointer)null); allocate(scope, params, indices, axis); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input params, @ByVal Input indices, @ByVal Input axis); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native GatherV2 operation(Operation operation); public native @ByRef Output output(); public native GatherV2 output(Output output); } /** Gives a guarantee to the TF runtime that the input tensor is a constant. * * The runtime is then free to make optimizations based on this. * * Only accepts value typed tensors as inputs and rejects resource variable handles * as input. * * Returns the input tensor without modification. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class GuaranteeConst extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GuaranteeConst(Pointer p) { super(p); } public GuaranteeConst(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native GuaranteeConst operation(Operation operation); public native @ByRef Output output(); public native GuaranteeConst output(Output output); } /** Return a tensor with the same shape and contents as the input tensor or value. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Identity extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Identity(Pointer p) { super(p); } public Identity(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Identity operation(Operation operation); public native @ByRef Output output(); public native Identity output(Output output); } /** Returns a list of tensors with the same shapes and contents as the input * * tensors. * * This op can be used to override the gradient for complicated functions. For * example, suppose y = f(x) and we wish to apply a custom function g for backprop * such that dx = g(dy). In Python, * *
{@code python
 *  with tf.get_default_graph().gradient_override_map(
 *      {'IdentityN': 'OverrideGradientWithG'}):
 *    y, _ = identity_n([f(x), x])
 * 
 *  @tf.RegisterGradient('OverrideGradientWithG')
 *  def ApplyG(op, dy, _):
 *    return [None, g(dy)]  # Do not backprop to f(x).
 *  }
* * Arguments: * * scope: A Scope object * * Returns: * * {@code OutputList}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class IdentityN extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IdentityN(Pointer p) { super(p); } public IdentityN(@Const @ByRef Scope scope, @ByVal InputList input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList input); public native @ByVal @Name("operator []") Output get(@Cast("size_t") long index); public native @ByRef Operation operation(); public native IdentityN operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector output(); public native IdentityN output(OutputVector output); } /** Returns immutable tensor from memory region. * * The current implementation memmaps the tensor from a file. * * Arguments: * * scope: A Scope object * * dtype: Type of the returned tensor. * * shape: Shape of the returned tensor. * * memory_region_name: Name of readonly memory region used by the tensor, see * NewReadOnlyMemoryRegionFromFile in tensorflow::Env. * * Returns: * * {@code Output}: The tensor tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class ImmutableConst extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ImmutableConst(Pointer p) { super(p); } public ImmutableConst(@Const @ByRef Scope scope, @Cast("tensorflow::DataType") int dtype, @ByVal PartialTensorShape shape, @StringPiece BytePointer memory_region_name) { super((Pointer)null); allocate(scope, dtype, shape, memory_region_name); } private native void allocate(@Const @ByRef Scope scope, @Cast("tensorflow::DataType") int dtype, @ByVal PartialTensorShape shape, @StringPiece BytePointer memory_region_name); public ImmutableConst(@Const @ByRef Scope scope, @Cast("tensorflow::DataType") int dtype, @ByVal PartialTensorShape shape, @StringPiece String memory_region_name) { super((Pointer)null); allocate(scope, dtype, shape, memory_region_name); } private native void allocate(@Const @ByRef Scope scope, @Cast("tensorflow::DataType") int dtype, @ByVal PartialTensorShape shape, @StringPiece String memory_region_name); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native ImmutableConst operation(Operation operation); public native @ByRef Output tensor(); public native ImmutableConst tensor(Output tensor); } /** Adds v into specified rows of x. * * Computes y = x; y[i, :] += v; return y. * * Arguments: * * scope: A Scope object * * x: A {@code Tensor} of type T. * * i: A vector. Indices into the left-most dimension of {@code x}. * * v: A {@code Tensor} of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. * * Returns: * * {@code Output}: A {@code Tensor} of type T. An alias of {@code x}. The content of {@code y} is undefined if there are duplicates in {@code i}. */ @Namespace("tensorflow::ops") @NoOffset public static class InplaceAdd extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InplaceAdd(Pointer p) { super(p); } public InplaceAdd(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input i, @ByVal Input v) { super((Pointer)null); allocate(scope, x, i, v); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input i, @ByVal Input v); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native InplaceAdd operation(Operation operation); public native @ByRef Output y(); public native InplaceAdd y(Output y); } /** Subtracts {@code v} into specified rows of {@code x}. * * Computes y = x; y[i, :] -= v; return y. * * Arguments: * * scope: A Scope object * * x: A {@code Tensor} of type T. * * i: A vector. Indices into the left-most dimension of {@code x}. * * v: A {@code Tensor} of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. * * Returns: * * {@code Output}: A {@code Tensor} of type T. An alias of {@code x}. The content of {@code y} is undefined if there are duplicates in {@code i}. */ @Namespace("tensorflow::ops") @NoOffset public static class InplaceSub extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InplaceSub(Pointer p) { super(p); } public InplaceSub(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input i, @ByVal Input v) { super((Pointer)null); allocate(scope, x, i, v); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input i, @ByVal Input v); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native InplaceSub operation(Operation operation); public native @ByRef Output y(); public native InplaceSub y(Output y); } /** Updates specified rows with values in {@code v}. * * Computes {@code x[i, :] = v; return x}. * * Arguments: * * scope: A Scope object * * x: A tensor of type {@code T}. * * i: A vector. Indices into the left-most dimension of {@code x}. * * v: A {@code Tensor} of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. * * Returns: * * {@code Output}: A {@code Tensor} of type T. An alias of {@code x}. The content of {@code y} is undefined if there are duplicates in {@code i}. */ @Namespace("tensorflow::ops") @NoOffset public static class InplaceUpdate extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InplaceUpdate(Pointer p) { super(p); } public InplaceUpdate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input i, @ByVal Input v) { super((Pointer)null); allocate(scope, x, i, v); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input i, @ByVal Input v); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native InplaceUpdate operation(Operation operation); public native @ByRef Output y(); public native InplaceUpdate y(Output y); } /** Computes the inverse permutation of a tensor. * * This operation computes the inverse of an index permutation. It takes a 1-D * integer tensor {@code x}, which represents the indices of a zero-based array, and * swaps each value with its index position. In other words, for an output tensor * {@code y} and an input tensor {@code x}, this operation computes the following: * * {@code y[x[i]] = i for i in [0, 1, ..., len(x) - 1]} * * The values must include 0. There can be no duplicate values or negative values. * * For example: * *
{@code
 *  # tensor `x` is [3, 4, 0, 2, 1]
 *  invert_permutation(x) ==> [2, 4, 3, 0, 1]
 *  }
* * Arguments: * * scope: A Scope object * * x: 1-D. * * Returns: * * {@code Output}: 1-D. */ @Namespace("tensorflow::ops") @NoOffset public static class InvertPermutation extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InvertPermutation(Pointer p) { super(p); } public InvertPermutation(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native InvertPermutation operation(Operation operation); public native @ByRef Output y(); public native InvertPermutation y(Output y); } /** Computes the difference between two lists of numbers or strings. * * Given a list {@code x} and a list {@code y}, this operation returns a list {@code out} that * represents all values that are in {@code x} but not in {@code y}. The returned list {@code out} * is sorted in the same order that the numbers appear in {@code x} (duplicates are * preserved). This operation also returns a list {@code idx} that represents the * position of each {@code out} element in {@code x}. In other words: * * {@code out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]} * * For example, given this input: * *
{@code
 *  x = [1, 2, 3, 4, 5, 6]
 *  y = [1, 3, 5]
 *  }
* * This operation would return: * *
{@code
 *  out ==> [2, 4, 6]
 *  idx ==> [1, 3, 5]
 *  }
* * Arguments: * * scope: A Scope object * * x: 1-D. Values to keep. * * y: 1-D. Values to remove. * * Returns: * * {@code Output} out: 1-D. Values present in {@code x} but not in {@code y}. * * {@code Output} idx: 1-D. Positions of {@code x} values preserved in {@code out}. */ @Namespace("tensorflow::ops") @NoOffset public static class SetDiff1D extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SetDiff1D(Pointer p) { super(p); } /** Optional attribute setters for SetDiff1D */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_INT32 */ public native @ByVal Attrs OutIdx(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int out_idx_(); public native Attrs out_idx_(int out_idx_); } public SetDiff1D(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public SetDiff1D(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, x, y, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y, @Const @ByRef Attrs attrs); public static native @ByVal Attrs OutIdx(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native SetDiff1D operation(Operation operation); public native @ByRef Output out(); public native SetDiff1D out(Output out); public native @ByRef Output idx(); public native SetDiff1D idx(Output idx); } /** Copy a tensor setting everything outside a central band in each innermost matrix * * to zero. * * The {@code band} part is computed as follows: * Assume {@code input} has {@code k} dimensions {@code [I, J, K, ..., M, N]}, then the output is a * tensor with the same shape where * * {@code band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]}. * * The indicator function * * {@code in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && * (num_upper < 0 || (n-m) <= num_upper)}. * * For example: * *
{@code
 *  # if 'input' is [[ 0,  1,  2, 3]
 *                   [-1,  0,  1, 2]
 *                   [-2, -1,  0, 1]
 *                   [-3, -2, -1, 0]],
 * 
 *  tf.matrix_band_part(input, 1, -1) ==> [[ 0,  1,  2, 3]
 *                                         [-1,  0,  1, 2]
 *                                         [ 0, -1,  0, 1]
 *                                         [ 0,  0, -1, 0]],
 * 
 *  tf.matrix_band_part(input, 2, 1) ==> [[ 0,  1,  0, 0]
 *                                        [-1,  0,  1, 0]
 *                                        [-2, -1,  0, 1]
 *                                        [ 0, -2, -1, 0]]
 *  }
* * Useful special cases: * *
{@code
 *   tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.
 *   tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.
 *   tf.matrix_band_part(input, 0, 0) ==> Diagonal.
 *  }
* * Arguments: * * scope: A Scope object * * input: Rank {@code k} tensor. * * num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire * lower triangle. * * num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep * entire upper triangle. * * Returns: * * {@code Output}: Rank {@code k} tensor of the same shape as input. The extracted banded tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class MatrixBandPart extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MatrixBandPart(Pointer p) { super(p); } public MatrixBandPart(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input num_lower, @ByVal Input num_upper) { super((Pointer)null); allocate(scope, input, num_lower, num_upper); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input num_lower, @ByVal Input num_upper); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native MatrixBandPart operation(Operation operation); public native @ByRef Output band(); public native MatrixBandPart band(Output band); } /** Returns a batched diagonal tensor with a given batched diagonal values. * * Given a {@code diagonal}, this operation returns a tensor with the {@code diagonal} and * everything else padded with zeros. The diagonal is computed as follows: * * Assume {@code diagonal} has {@code k} dimensions {@code [I, J, K, ..., N]}, then the output is a * tensor of rank {@code k+1} with dimensions [I, J, K, ..., N, N]{@code where: * * }output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]{@code . * * For example: * * }{@code } * # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]] * * and diagonal.shape = (2, 4) * * tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0] * [0, 2, 0, 0] * [0, 0, 3, 0] * [0, 0, 0, 4]], * [[5, 0, 0, 0] * [0, 6, 0, 0] * [0, 0, 7, 0] * [0, 0, 0, 8]]] * * which has shape (2, 4, 4) *
{@code
 * 
 *  Arguments:
 *  * scope: A Scope object
 *  * diagonal: Rank `k`, where `k >= 1`.
 * 
 *  Returns:
 *  * `Output`: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`. */
@Namespace("tensorflow::ops") @NoOffset public static class MatrixDiag extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public MatrixDiag(Pointer p) { super(p); }

  public MatrixDiag(@Const @ByRef Scope scope, @ByVal Input diagonal) { super((Pointer)null); allocate(scope, diagonal); }
  private native void allocate(@Const @ByRef Scope scope, @ByVal Input diagonal);
  public native @ByVal @Name("operator tensorflow::Output") Output asOutput();
  public native @ByVal @Name("operator tensorflow::Input") Input asInput();
  public native Node node();

  public native @ByRef Operation operation(); public native MatrixDiag operation(Operation operation);
  public native @ByRef Output output(); public native MatrixDiag output(Output output);
}

/** Returns the batched diagonal part of a batched tensor.
 * 
 *  This operation returns a tensor with the {@code diagonal} part
 *  of the batched {@code input}. The {@code diagonal} part is computed as follows:
 * 
 *  Assume {@code input} has {@code k} dimensions {@code [I, J, K, ..., M, N]}, then the output is a
 *  tensor of rank {@code k - 1} with dimensions {@code [I, J, K, ..., min(M, N)]} where:
 * 
 *  {@code diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]}.
 * 
 *  The input must be at least a matrix.
 * 
 *  For example:
 * 
 *  
{@code
 *  # 'input' is [[[1, 0, 0, 0]
 *                 [0, 2, 0, 0]
 *                 [0, 0, 3, 0]
 *                 [0, 0, 0, 4]],
 *                [[5, 0, 0, 0]
 *                 [0, 6, 0, 0]
 *                 [0, 0, 7, 0]
 *                 [0, 0, 0, 8]]]
 * 
 *  and input.shape = (2, 4, 4)
 * 
 *  tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]
 * 
 *  which has shape (2, 4)
 *  }
* * Arguments: * * scope: A Scope object * * input: Rank {@code k} tensor where {@code k >= 2}. * * Returns: * * {@code Output}: The extracted diagonal(s) having shape * {@code diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]}. */ @Namespace("tensorflow::ops") @NoOffset public static class MatrixDiagPart extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MatrixDiagPart(Pointer p) { super(p); } public MatrixDiagPart(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native MatrixDiagPart operation(Operation operation); public native @ByRef Output diagonal(); public native MatrixDiagPart diagonal(Output diagonal); } /** Returns a batched matrix tensor with new batched diagonal values. * * Given {@code input} and {@code diagonal}, this operation returns a tensor with the * same shape and values as {@code input}, except for the main diagonal of the * innermost matrices. These will be overwritten by the values in {@code diagonal}. * * The output is computed as follows: * * Assume {@code input} has {@code k+1} dimensions {@code [I, J, K, ..., M, N]} and {@code diagonal} has * {@code k} dimensions {@code [I, J, K, ..., min(M, N)]}. Then the output is a * tensor of rank {@code k+1} with dimensions {@code [I, J, K, ..., M, N]} where: * * * {@code output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]} for {@code m == n}. * * {@code output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]} for {@code m != n}. * * Arguments: * * scope: A Scope object * * input: Rank {@code k+1}, where {@code k >= 1}. * * diagonal: Rank {@code k}, where {@code k >= 1}. * * Returns: * * {@code Output}: Rank {@code k+1}, with {@code output.shape = input.shape}. */ @Namespace("tensorflow::ops") @NoOffset public static class MatrixSetDiag extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MatrixSetDiag(Pointer p) { super(p); } public MatrixSetDiag(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input diagonal) { super((Pointer)null); allocate(scope, input, diagonal); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input diagonal); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native MatrixSetDiag operation(Operation operation); public native @ByRef Output output(); public native MatrixSetDiag output(Output output); } /** Pads a tensor with mirrored values. * * This operation pads a {@code input} with mirrored values according to the {@code paddings} * you specify. {@code paddings} is an integer tensor with shape {@code [n, 2]}, where n is * the rank of {@code input}. For each dimension D of {@code input}, {@code paddings[D, 0]} indicates * how many values to add before the contents of {@code input} in that dimension, and * {@code paddings[D, 1]} indicates how many values to add after the contents of {@code input} * in that dimension. Both {@code paddings[D, 0]} and {@code paddings[D, 1]} must be no greater * than {@code input.dim_size(D)} (or {@code input.dim_size(D) - 1}) if {@code copy_border} is true * (if false, respectively). * * The padded size of each dimension D of the output is: * * {@code paddings(D, 0) + input.dim_size(D) + paddings(D, 1)} * * For example: * *
{@code
 *  # 't' is [[1, 2, 3], [4, 5, 6]].
 *  # 'paddings' is [[1, 1]], [2, 2]].
 *  # 'mode' is SYMMETRIC.
 *  # rank of 't' is 2.
 *  pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
 *                        [2, 1, 1, 2, 3, 3, 2]
 *                        [5, 4, 4, 5, 6, 6, 5]
 *                        [5, 4, 4, 5, 6, 6, 5]]
 *  }
* * Arguments: * * scope: A Scope object * * input: The input tensor to be padded. * * paddings: A two-column matrix specifying the padding sizes. The number of * rows must be the same as the rank of {@code input}. * * mode: Either {@code REFLECT} or {@code SYMMETRIC}. In reflect mode the padded regions * do not include the borders, while in symmetric mode the padded regions * do include the borders. For example, if {@code input} is {@code [1, 2, 3]} and {@code paddings} * is {@code [0, 2]}, then the output is {@code [1, 2, 3, 2, 1]} in reflect mode, and * it is {@code [1, 2, 3, 3, 2]} in symmetric mode. * * Returns: * * {@code Output}: The padded tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class MirrorPad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MirrorPad(Pointer p) { super(p); } public MirrorPad(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings, @StringPiece BytePointer mode) { super((Pointer)null); allocate(scope, input, paddings, mode); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings, @StringPiece BytePointer mode); public MirrorPad(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings, @StringPiece String mode) { super((Pointer)null); allocate(scope, input, paddings, mode); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings, @StringPiece String mode); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native MirrorPad operation(Operation operation); public native @ByRef Output output(); public native MirrorPad output(Output output); } /** Returns a one-hot tensor. * * The locations represented by indices in {@code indices} take value {@code on_value}, * while all other locations take value {@code off_value}. * * If the input {@code indices} is rank {@code N}, the output will have rank {@code N+1}, * The new axis is created at dimension {@code axis} (default: the new axis is * appended at the end). * * If {@code indices} is a scalar the output shape will be a vector of length {@code depth}. * * If {@code indices} is a vector of length {@code features}, the output shape will be: *
{@code
 *    features x depth if axis == -1
 *    depth x features if axis == 0
 *  }
* * If {@code indices} is a matrix (batch) with shape {@code [batch, features]}, * the output shape will be: *
{@code
 *    batch x features x depth if axis == -1
 *    batch x depth x features if axis == 1
 *    depth x batch x features if axis == 0
 *  }
* * * Examples * ========= * * Suppose that * *
{@code
 *    indices = [0, 2, -1, 1]
 *    depth = 3
 *    on_value = 5.0
 *    off_value = 0.0
 *    axis = -1
 *  }
* * Then output is {@code [4 x 3]}: * *
{@code output =
 *        [5.0 0.0 0.0]  // one_hot(0)
 *        [0.0 0.0 5.0]  // one_hot(2)
 *        [0.0 0.0 0.0]  // one_hot(-1)
 *        [0.0 5.0 0.0]  // one_hot(1)
 *      }
* * Suppose that * *
{@code
 *    indices = [0, 2, -1, 1]
 *    depth = 3
 *    on_value = 0.0
 *    off_value = 3.0
 *    axis = 0
 *  }
* * Then output is {@code [3 x 4]}: * *
{@code output =
 *        [0.0 3.0 3.0 3.0]
 *        [3.0 3.0 3.0 0.0]
 *        [3.0 3.0 3.0 3.0]
 *        [3.0 0.0 3.0 3.0]
 *      //  ^                one_hot(0)
 *      //      ^            one_hot(2)
 *      //          ^        one_hot(-1)
 *      //              ^    one_hot(1)
 *      }
* Suppose that * *
{@code
 *    indices = [[0, 2], [1, -1]]
 *    depth = 3
 *    on_value = 1.0
 *    off_value = 0.0
 *    axis = -1
 *  }
* * Then output is {@code [2 x 2 x 3]}: * *
{@code output =
 *        [
 *          [1.0, 0.0, 0.0]  // one_hot(0)
 *          [0.0, 0.0, 1.0]  // one_hot(2)
 *        ][
 *          [0.0, 1.0, 0.0]  // one_hot(1)
 *          [0.0, 0.0, 0.0]  // one_hot(-1)
 *        ]}
* * Arguments: * * scope: A Scope object * * indices: A tensor of indices. * * depth: A scalar defining the depth of the one hot dimension. * * on_value: A scalar defining the value to fill in output when {@code indices[j] = i}. * * off_value: A scalar defining the value to fill in output when {@code indices[j] != i}. * * Optional attributes (see {@code Attrs}): * * axis: The axis to fill (default: -1, a new inner-most axis). * * Returns: * * {@code Output}: The one-hot tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class OneHot extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OneHot(Pointer p) { super(p); } /** Optional attribute setters for OneHot */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The axis to fill (default: -1, a new inner-most axis). * * Defaults to -1 */ public native @ByVal Attrs Axis(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long axis_(); public native Attrs axis_(long axis_); } public OneHot(@Const @ByRef Scope scope, @ByVal Input indices, @ByVal Input depth, @ByVal Input on_value, @ByVal Input off_value) { super((Pointer)null); allocate(scope, indices, depth, on_value, off_value); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input indices, @ByVal Input depth, @ByVal Input on_value, @ByVal Input off_value); public OneHot(@Const @ByRef Scope scope, @ByVal Input indices, @ByVal Input depth, @ByVal Input on_value, @ByVal Input off_value, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, indices, depth, on_value, off_value, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input indices, @ByVal Input depth, @ByVal Input on_value, @ByVal Input off_value, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Axis(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native OneHot operation(Operation operation); public native @ByRef Output output(); public native OneHot output(Output output); } /** Returns a tensor of ones with the same shape and type as x. * * Arguments: * * scope: A Scope object * * x: a tensor of type T. * * Returns: * * {@code Output}: a tensor of the same shape and type as x but filled with ones. */ @Namespace("tensorflow::ops") @NoOffset public static class OnesLike extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OnesLike(Pointer p) { super(p); } public OnesLike(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native OnesLike operation(Operation operation); public native @ByRef Output y(); public native OnesLike y(Output y); } /** Packs a list of {@code N} rank-{@code R} tensors into one rank-{@code (R+1)} tensor. * * Packs the {@code N} tensors in {@code values} into a tensor with rank one higher than each * tensor in {@code values}, by packing them along the {@code axis} dimension. * Given a list of tensors of shape {@code (A, B, C)}; * * if {@code axis == 0} then the {@code output} tensor will have the shape {@code (N, A, B, C)}. * if {@code axis == 1} then the {@code output} tensor will have the shape {@code (A, N, B, C)}. * Etc. * * For example: * *
{@code
 *  # 'x' is [1, 4]
 *  # 'y' is [2, 5]
 *  # 'z' is [3, 6]
 *  pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
 *  pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
 *  }
* * This is the opposite of {@code unpack}. * * Arguments: * * scope: A Scope object * * values: Must be of same shape and type. * * Optional attributes (see {@code Attrs}): * * axis: Dimension along which to pack. Negative values wrap around, so the * valid range is {@code [-(R+1), R+1)}. * * Returns: * * {@code Output}: The packed tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Stack extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Stack(Pointer p) { super(p); } /** Optional attribute setters for Stack */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Dimension along which to pack. Negative values wrap around, so the * valid range is {@code [-(R+1), R+1)}. * * Defaults to 0 */ public native @ByVal Attrs Axis(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long axis_(); public native Attrs axis_(long axis_); } public Stack(@Const @ByRef Scope scope, @ByVal InputList values) { super((Pointer)null); allocate(scope, values); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList values); public Stack(@Const @ByRef Scope scope, @ByVal InputList values, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, values, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList values, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Axis(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native Stack operation(Operation operation); public native @ByRef Output output(); public native Stack output(Output output); } /** Pads a tensor with zeros. * * This operation pads a {@code input} with zeros according to the {@code paddings} you * specify. {@code paddings} is an integer tensor with shape {@code [Dn, 2]}, where n is the * rank of {@code input}. For each dimension D of {@code input}, {@code paddings[D, 0]} indicates * how many zeros to add before the contents of {@code input} in that dimension, and * {@code paddings[D, 1]} indicates how many zeros to add after the contents of {@code input} * in that dimension. * * The padded size of each dimension D of the output is: * * {@code paddings(D, 0) + input.dim_size(D) + paddings(D, 1)} * * For example: * *
{@code
 *  # 't' is [[1, 1], [2, 2]]
 *  # 'paddings' is [[1, 1], [2, 2]]
 *  # rank of 't' is 2
 *  pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
 *                        [0, 0, 1, 1, 0, 0]
 *                        [0, 0, 2, 2, 0, 0]
 *                        [0, 0, 0, 0, 0, 0]]
 *  }
* * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Pad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Pad(Pointer p) { super(p); } public Pad(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings) { super((Pointer)null); allocate(scope, input, paddings); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Pad operation(Operation operation); public native @ByRef Output output(); public native Pad output(Output output); } /** Pads a tensor. * * This operation pads {@code input} according to the {@code paddings} and {@code constant_values} * you specify. {@code paddings} is an integer tensor with shape {@code [Dn, 2]}, where n is * the rank of {@code input}. For each dimension D of {@code input}, {@code paddings[D, 0]} indicates * how many padding values to add before the contents of {@code input} in that dimension, * and {@code paddings[D, 1]} indicates how many padding values to add after the contents * of {@code input} in that dimension. {@code constant_values} is a scalar tensor of the same * type as {@code input} that indicates the value to use for padding {@code input}. * * The padded size of each dimension D of the output is: * * {@code paddings(D, 0) + input.dim_size(D) + paddings(D, 1)} * * For example: * *
{@code
 *  # 't' is [[1, 1], [2, 2]]
 *  # 'paddings' is [[1, 1], [2, 2]]
 *  # 'constant_values' is 0
 *  # rank of 't' is 2
 *  pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
 *                        [0, 0, 1, 1, 0, 0]
 *                        [0, 0, 2, 2, 0, 0]
 *                        [0, 0, 0, 0, 0, 0]]
 *  }
* * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class PadV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PadV2(Pointer p) { super(p); } public PadV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings, @ByVal Input constant_values) { super((Pointer)null); allocate(scope, input, paddings, constant_values); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings, @ByVal Input constant_values); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native PadV2 operation(Operation operation); public native @ByRef Output output(); public native PadV2 output(Output output); } /** Concatenates a list of {@code N} tensors along the first dimension. * * The input tensors are all required to have size 1 in the first dimension. * * For example: * *
{@code
 *  # 'x' is [[1, 4]]
 *  # 'y' is [[2, 5]]
 *  # 'z' is [[3, 6]]
 *  parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
 *  }
* * The difference between concat and parallel_concat is that concat requires all * of the inputs be computed before the operation will begin but doesn't require * that the input shapes be known during graph construction. Parallel concat * will copy pieces of the input into the output as they become available, in * some situations this can provide a performance benefit. * * Arguments: * * scope: A Scope object * * values: Tensors to be concatenated. All must have size 1 in the first dimension * and same shape. * * shape: the final shape of the result; should be equal to the shapes of any input * but with the number of input values in the first dimension. * * Returns: * * {@code Output}: The concatenated tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class ParallelConcat extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ParallelConcat(Pointer p) { super(p); } public ParallelConcat(@Const @ByRef Scope scope, @ByVal InputList values, @ByVal PartialTensorShape shape) { super((Pointer)null); allocate(scope, values, shape); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList values, @ByVal PartialTensorShape shape); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native ParallelConcat operation(Operation operation); public native @ByRef Output output(); public native ParallelConcat output(Output output); } /** A placeholder op for a value that will be fed into the computation. * * N.B. This operation will fail with an error if it is executed. It is * intended as a way to represent a value that will always be fed, and to * provide attrs that enable the fed value to be checked at runtime. * * Arguments: * * scope: A Scope object * * dtype: The type of elements in the tensor. * * Optional attributes (see {@code Attrs}): * * shape: (Optional) The shape of the tensor. If the shape has 0 dimensions, the * shape is unconstrained. * * Returns: * * {@code Output}: A placeholder tensor that must be replaced using the feed mechanism. */ @Namespace("tensorflow::ops") @NoOffset public static class Placeholder extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Placeholder(Pointer p) { super(p); } /** Optional attribute setters for Placeholder */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** (Optional) The shape of the tensor. If the shape has 0 dimensions, the * shape is unconstrained. * * Defaults to */ public native @ByVal Attrs Shape(@ByVal PartialTensorShape x); public native @ByRef PartialTensorShape shape_(); public native Attrs shape_(PartialTensorShape shape_); } public Placeholder(@Const @ByRef Scope scope, @Cast("tensorflow::DataType") int dtype) { super((Pointer)null); allocate(scope, dtype); } private native void allocate(@Const @ByRef Scope scope, @Cast("tensorflow::DataType") int dtype); public Placeholder(@Const @ByRef Scope scope, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, dtype, attrs); } private native void allocate(@Const @ByRef Scope scope, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Shape(@ByVal PartialTensorShape x); public native @ByRef Operation operation(); public native Placeholder operation(Operation operation); public native @ByRef Output output(); public native Placeholder output(Output output); } /** A placeholder op that passes through {@code input} when its output is not fed. * * Arguments: * * scope: A Scope object * * input: The default value to produce when {@code output} is not fed. * * shape: The (possibly partial) shape of the tensor. * * Returns: * * {@code Output}: A placeholder tensor that defaults to {@code input} if it is not fed. */ @Namespace("tensorflow::ops") @NoOffset public static class PlaceholderWithDefault extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PlaceholderWithDefault(Pointer p) { super(p); } public PlaceholderWithDefault(@Const @ByRef Scope scope, @ByVal Input input, @ByVal PartialTensorShape shape) { super((Pointer)null); allocate(scope, input, shape); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal PartialTensorShape shape); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native PlaceholderWithDefault operation(Operation operation); public native @ByRef Output output(); public native PlaceholderWithDefault output(Output output); } /** An identity op that triggers an error if a gradient is requested. * * When executed in a graph, this op outputs its input tensor as-is. * * When building ops to compute gradients, the TensorFlow gradient system * will return an error when trying to lookup the gradient of this op, * because no gradient must ever be registered for this function. This * op exists to prevent subtle bugs from silently returning unimplemented * gradients in some corner cases. * * Arguments: * * scope: A Scope object * * input: any tensor. * * Optional attributes (see {@code Attrs}): * * message: Will be printed in the error when anyone tries to differentiate * this operation. * * Returns: * * {@code Output}: the same input tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class PreventGradient extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PreventGradient(Pointer p) { super(p); } /** Optional attribute setters for PreventGradient */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Will be printed in the error when anyone tries to differentiate * this operation. * * Defaults to "" */ public native @ByVal Attrs Message(@StringPiece BytePointer x); public native @ByVal Attrs Message(@StringPiece String x); public native @StringPiece BytePointer message_(); public native Attrs message_(BytePointer message_); } public PreventGradient(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public PreventGradient(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Message(@StringPiece BytePointer x); public static native @ByVal Attrs Message(@StringPiece String x); public native @ByRef Operation operation(); public native PreventGradient operation(Operation operation); public native @ByRef Output output(); public native PreventGradient output(Output output); } /** Quantizes then dequantizes a tensor. * * This op simulates the precision loss from the quantized forward pass by: * * 1. Quantizing the tensor to fixed point numbers, which should match the target * quantization method when it is used in inference. * 2. Dequantizing it back to floating point numbers for the following ops, most * likely matmul. * * There are different ways to quantize. This version uses only scaling, so 0.0 * maps to 0. * * From the specified 'num_bits' in the quantized output type, it determines * minimum and maximum representable quantized values. * * e.g. * * * [-128, 127] for signed, num_bits = 8, or * * [0, 255] for unsigned, num_bits = 8. * * If range_given == False, the initial input_min, input_max will be determined * automatically as the minimum and maximum values in the input tensor, otherwise * the specified values of input_min, input_max are used. * * Note: If the input_min, input_max are specified, they do not need to equal the * actual minimum and maximum values in the tensor. e.g. in some cases it may be * beneficial to specify these values such that the low probability extremes of the * input distribution are clipped. * * This op determines the maximum scale_factor that would map the initial * [input_min, input_max] range to a range that lies within the representable * quantized range. * * It determines the scale from one of input_min and input_max, then updates the * other one to maximize the respresentable range. * * e.g. * * * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, * 5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it * would update input_max to be 127 / 12.8 = 9.921875 * * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, * 10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it * would update input_min to be 128.0 / 12.7 = -10.07874 * * if the output is unsigned, input_min is forced to be 0, and only the * specified input_max is used. * * After determining the scale_factor and updating the input range, it applies the * following to each value in the 'input' tensor. * * output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor. * * * Arguments: * * scope: A Scope object * * input: Tensor to quantize and then dequantize. * * input_min: If {@code range_given == True}, this specifies the minimum input value that needs to * be represented, otherwise it is determined from the min value of the {@code input} * tensor. * * input_max: If {@code range_given == True}, this specifies the maximum input value that needs to * be represented, otherwise it is determined from the max value of the {@code input} * tensor. * * Optional attributes (see {@code Attrs}): * * signed_input: Whether the quantization is signed or unsigned. (actually this parameter should * have been called {@code signed_output}) * * num_bits: The bitwidth of the quantization. * * range_given: Whether the range is given or should be determined from the {@code input} tensor. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class QuantizeAndDequantizeV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QuantizeAndDequantizeV2(Pointer p) { super(p); } /** Optional attribute setters for QuantizeAndDequantizeV2 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Whether the quantization is signed or unsigned. (actually this parameter should * have been called {@code signed_output}) * * Defaults to true */ /// public native @ByVal Attrs SignedInput(@Cast("bool") boolean x); /** The bitwidth of the quantization. * * Defaults to 8 */ /// public native @ByVal Attrs NumBits(@Cast("tensorflow::int64") long x); /** Whether the range is given or should be determined from the {@code input} tensor. * * Defaults to false */ public native @ByVal Attrs RangeGiven(@Cast("bool") boolean x); public native @Cast("bool") boolean signed_input_(); public native Attrs signed_input_(boolean signed_input_); public native @Cast("tensorflow::int64") long num_bits_(); public native Attrs num_bits_(long num_bits_); public native @Cast("bool") boolean range_given_(); public native Attrs range_given_(boolean range_given_); } public QuantizeAndDequantizeV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input input_min, @ByVal Input input_max) { super((Pointer)null); allocate(scope, input, input_min, input_max); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input input_min, @ByVal Input input_max); public QuantizeAndDequantizeV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input input_min, @ByVal Input input_max, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, input_min, input_max, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input input_min, @ByVal Input input_max, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs SignedInput(@Cast("bool") boolean x); public static native @ByVal Attrs NumBits(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs RangeGiven(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native QuantizeAndDequantizeV2 operation(Operation operation); public native @ByRef Output output(); public native QuantizeAndDequantizeV2 output(Output output); } /** Quantizes then dequantizes a tensor. * * This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a * tensor, so its value can change during training. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class QuantizeAndDequantizeV3 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QuantizeAndDequantizeV3(Pointer p) { super(p); } /** Optional attribute setters for QuantizeAndDequantizeV3 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to true */ public native @ByVal Attrs SignedInput(@Cast("bool") boolean x); /** Defaults to true */ public native @ByVal Attrs RangeGiven(@Cast("bool") boolean x); public native @Cast("bool") boolean signed_input_(); public native Attrs signed_input_(boolean signed_input_); public native @Cast("bool") boolean range_given_(); public native Attrs range_given_(boolean range_given_); } public QuantizeAndDequantizeV3(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input input_min, @ByVal Input input_max, @ByVal Input num_bits) { super((Pointer)null); allocate(scope, input, input_min, input_max, num_bits); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input input_min, @ByVal Input input_max, @ByVal Input num_bits); public QuantizeAndDequantizeV3(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input input_min, @ByVal Input input_max, @ByVal Input num_bits, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, input_min, input_max, num_bits, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input input_min, @ByVal Input input_max, @ByVal Input num_bits, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs SignedInput(@Cast("bool") boolean x); public static native @ByVal Attrs RangeGiven(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native QuantizeAndDequantizeV3 operation(Operation operation); public native @ByRef Output output(); public native QuantizeAndDequantizeV3 output(Output output); } /** Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. * * [min_range, max_range] are scalar floats that specify the range for * the 'input' data. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. The * 'round_mode' attribute controls which rounding tie-breaking algorithm is used * when rounding float values to their quantized equivalents. * * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: * *
{@code
 *  out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)
 *  if T == qint8, out[i] -= (range(T) + 1) / 2.0
 *  }
* * here {@code range(T) = numeric_limits::max() - numeric_limits::min()} * * *MIN_COMBINED Mode Example* * * Assume the input is type float and has a possible range of [0.0, 6.0] and the * output type is quint8 ([0, 255]). The min_range and max_range values should be * specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each * value of the input by 255/6 and cast to quint8. * * If the output type was qint8 ([-128, 127]), the operation will additionally * subtract each value by 128 prior to casting, so that the range of values aligns * with the range of qint8. * * If the mode is 'MIN_FIRST', then this approach is used: * *
{@code
 *  num_discrete_values = 1 << (# of bits in T)
 *  range_adjust = num_discrete_values / (num_discrete_values - 1)
 *  range = (range_max - range_min) * range_adjust
 *  range_scale = num_discrete_values / range
 *  quantized = round(input * range_scale) - round(range_min * range_scale) +
 *    numeric_limits::min()
 *  quantized = max(quantized, numeric_limits::min())
 *  quantized = min(quantized, numeric_limits::max())
 *  }
* * The biggest difference between this and MIN_COMBINED is that the minimum range * is rounded first, before it's subtracted from the rounded value. With * MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing * and dequantizing will introduce a larger and larger error. * * *SCALED mode Example* * * {@code SCALED} mode matches the quantization approach used in * {@code QuantizeAndDequantize{V2|V3}}. * * If the mode is {@code SCALED}, we do not use the full range of the output type, * choosing to elide the lowest possible value for symmetry (e.g., output range is * -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to * 0. * * We first find the range of values in our tensor. The * range we use is always centered on 0, so we find m such that * *
{@code c++
 *    m = max(abs(input_min), abs(input_max))
 *  }
* * Our input tensor range is then {@code [-m, m]}. * * Next, we choose our fixed-point quantization buckets, {@code [min_fixed, max_fixed]}. * If T is signed, this is * *
{@code
 *    num_bits = sizeof(T) * 8
 *    [min_fixed, max_fixed] =
 *        [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]
 *  }
* * Otherwise, if T is unsigned, the fixed-point range is * *
{@code
 *    [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]
 *  }
* * From this we compute our scaling factor, s: * *
{@code c++
 *    s = (max_fixed - min_fixed) / (2 * m)
 *  }
* * Now we can quantize the elements of our tensor: * *
{@code c++
 *  result = round(input * s)
 *  }
* * One thing to watch out for is that the operator may choose to adjust the * requested minimum and maximum values slightly during the quantization process, * so you should always use the output ports as the range for further calculations. * For example, if the requested minimum and maximum values are close to equal, * they will be separated by a small epsilon value to prevent ill-formed quantized * buffers from being created. Otherwise, you can end up with buffers where all the * quantized values map to the same float value, which causes problems for * operations that have to perform further calculations on them. * * Arguments: * * scope: A Scope object * * min_range: The minimum scalar value possibly produced for the input. * * max_range: The maximum scalar value possibly produced for the input. * * Returns: * * {@code Output} output: The quantized data produced from the float input. * * {@code Output} output_min: The actual minimum scalar value used for the output. * * {@code Output} output_max: The actual maximum scalar value used for the output. */ @Namespace("tensorflow::ops") @NoOffset public static class QuantizeV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QuantizeV2(Pointer p) { super(p); } /** Optional attribute setters for QuantizeV2 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to "MIN_COMBINED" */ public native @ByVal Attrs Mode(@StringPiece BytePointer x); public native @ByVal Attrs Mode(@StringPiece String x); /** Defaults to "HALF_AWAY_FROM_ZERO" */ public native @ByVal Attrs RoundMode(@StringPiece BytePointer x); public native @ByVal Attrs RoundMode(@StringPiece String x); public native @StringPiece BytePointer mode_(); public native Attrs mode_(BytePointer mode_); public native @StringPiece BytePointer round_mode_(); public native Attrs round_mode_(BytePointer round_mode_); } public QuantizeV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_range, @ByVal Input max_range, @Cast("tensorflow::DataType") int T) { super((Pointer)null); allocate(scope, input, min_range, max_range, T); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_range, @ByVal Input max_range, @Cast("tensorflow::DataType") int T); public QuantizeV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_range, @ByVal Input max_range, @Cast("tensorflow::DataType") int T, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, min_range, max_range, T, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_range, @ByVal Input max_range, @Cast("tensorflow::DataType") int T, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Mode(@StringPiece BytePointer x); public static native @ByVal Attrs Mode(@StringPiece String x); public static native @ByVal Attrs RoundMode(@StringPiece BytePointer x); public static native @ByVal Attrs RoundMode(@StringPiece String x); public native @ByRef Operation operation(); public native QuantizeV2 operation(Operation operation); public native @ByRef Output output(); public native QuantizeV2 output(Output output); public native @ByRef Output output_min(); public native QuantizeV2 output_min(Output output_min); public native @ByRef Output output_max(); public native QuantizeV2 output_max(Output output_max); } /** Concatenates quantized tensors along one dimension. * * Arguments: * * scope: A Scope object * * concat_dim: 0-D. The dimension along which to concatenate. Must be in the * range [0, rank(values)). * * values: The {@code N} Tensors to concatenate. Their ranks and types must match, * and their sizes must match in all dimensions except {@code concat_dim}. * * input_mins: The minimum scalar values for each of the input tensors. * * input_maxes: The maximum scalar values for each of the input tensors. * * Returns: * * {@code Output} output: A {@code Tensor} with the concatenation of values stacked along the * {@code concat_dim} dimension. This tensor's shape matches that of {@code values} except * in {@code concat_dim} where it has the sum of the sizes. * * {@code Output} output_min: The float value that the minimum quantized output value represents. * * {@code Output} output_max: The float value that the maximum quantized output value represents. */ @Namespace("tensorflow::ops") @NoOffset public static class QuantizedConcat extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QuantizedConcat(Pointer p) { super(p); } public QuantizedConcat(@Const @ByRef Scope scope, @ByVal Input concat_dim, @ByVal InputList values, @ByVal InputList input_mins, @ByVal InputList input_maxes) { super((Pointer)null); allocate(scope, concat_dim, values, input_mins, input_maxes); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input concat_dim, @ByVal InputList values, @ByVal InputList input_mins, @ByVal InputList input_maxes); public native @ByRef Operation operation(); public native QuantizedConcat operation(Operation operation); public native @ByRef Output output(); public native QuantizedConcat output(Output output); public native @ByRef Output output_min(); public native QuantizedConcat output_min(Output output_min); public native @ByRef Output output_max(); public native QuantizedConcat output_max(Output output_max); } /** Quantized Instance normalization. * * Arguments: * * scope: A Scope object * * x: A 4D input Tensor. * * x_min: The value represented by the lowest quantized input. * * x_max: The value represented by the highest quantized input. * * Optional attributes (see {@code Attrs}): * * output_range_given: If True, {@code given_y_min} and {@code given_y_min} * and {@code given_y_max} are used as the output range. Otherwise, * the implementation computes the output range. * * given_y_min: Output in {@code y_min} if {@code output_range_given} is True. * * given_y_max: Output in {@code y_max} if {@code output_range_given} is True. * * variance_epsilon: A small float number to avoid dividing by 0. * * min_separation: Minimum value of {@code y_max - y_min} * * Returns: * * {@code Output} y: A 4D Tensor. * * {@code Output} y_min: The value represented by the lowest quantized output. * * {@code Output} y_max: The value represented by the highest quantized output. */ @Namespace("tensorflow::ops") @NoOffset public static class QuantizedInstanceNorm extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QuantizedInstanceNorm(Pointer p) { super(p); } /** Optional attribute setters for QuantizedInstanceNorm */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, {@code given_y_min} and {@code given_y_min} * and {@code given_y_max} are used as the output range. Otherwise, * the implementation computes the output range. * * Defaults to false */ /// public native @ByVal Attrs OutputRangeGiven(@Cast("bool") boolean x); /** Output in {@code y_min} if {@code output_range_given} is True. * * Defaults to 0 */ /// public native @ByVal Attrs GivenYMin(float x); /** Output in {@code y_max} if {@code output_range_given} is True. * * Defaults to 0 */ /// public native @ByVal Attrs GivenYMax(float x); /** A small float number to avoid dividing by 0. * * Defaults to 1e-05 */ /// public native @ByVal Attrs VarianceEpsilon(float x); /** Minimum value of {@code y_max - y_min} * * Defaults to 0.001 */ public native @ByVal Attrs MinSeparation(float x); public native @Cast("bool") boolean output_range_given_(); public native Attrs output_range_given_(boolean output_range_given_); public native float given_y_min_(); public native Attrs given_y_min_(float given_y_min_); public native float given_y_max_(); public native Attrs given_y_max_(float given_y_max_); public native float variance_epsilon_(); public native Attrs variance_epsilon_(float variance_epsilon_); public native float min_separation_(); public native Attrs min_separation_(float min_separation_); } public QuantizedInstanceNorm(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input x_min, @ByVal Input x_max) { super((Pointer)null); allocate(scope, x, x_min, x_max); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input x_min, @ByVal Input x_max); public QuantizedInstanceNorm(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input x_min, @ByVal Input x_max, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, x, x_min, x_max, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input x_min, @ByVal Input x_max, @Const @ByRef Attrs attrs); public static native @ByVal Attrs OutputRangeGiven(@Cast("bool") boolean x); public static native @ByVal Attrs GivenYMin(float x); public static native @ByVal Attrs GivenYMax(float x); public static native @ByVal Attrs VarianceEpsilon(float x); public static native @ByVal Attrs MinSeparation(float x); public native @ByRef Operation operation(); public native QuantizedInstanceNorm operation(Operation operation); public native @ByRef Output y(); public native QuantizedInstanceNorm y(Output y); public native @ByRef Output y_min(); public native QuantizedInstanceNorm y_min(Output y_min); public native @ByRef Output y_max(); public native QuantizedInstanceNorm y_max(Output y_max); } /** Reshapes a quantized tensor as per the Reshape op. * *
{@code
 * 
 *  Arguments:
 *  * scope: A Scope object
 *  * shape: Defines the shape of the output tensor.
 *  * input_min: The minimum value of the input.
 *  * input_max: The maximum value of the input.
 * 
 *  Returns:
 *  * `Output` output
 *  * `Output` output_min: This value is copied from input_min.
 *  * `Output` output_max: This value is copied from input_max. */
@Namespace("tensorflow::ops") @NoOffset public static class QuantizedReshape extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public QuantizedReshape(Pointer p) { super(p); }

  public QuantizedReshape(@Const @ByRef Scope scope, @ByVal Input tensor,
                   @ByVal Input shape, @ByVal Input input_min,
                   @ByVal Input input_max) { super((Pointer)null); allocate(scope, tensor, shape, input_min, input_max); }
  private native void allocate(@Const @ByRef Scope scope, @ByVal Input tensor,
                   @ByVal Input shape, @ByVal Input input_min,
                   @ByVal Input input_max);

  public native @ByRef Operation operation(); public native QuantizedReshape operation(Operation operation);
  public native @ByRef Output output(); public native QuantizedReshape output(Output output);
  public native @ByRef Output output_min(); public native QuantizedReshape output_min(Output output_min);
  public native @ByRef Output output_max(); public native QuantizedReshape output_max(Output output_max);
}

/** Returns the rank of a tensor.
 * 
 *  This operation returns an integer representing the rank of {@code input}.
 * 
 *  For example:
 * 
 *  
{@code
 *  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
 *  # shape of tensor 't' is [2, 2, 3]
 *  rank(t) ==> 3
 *  }
* * **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank * of a tensor is the number of indices required to uniquely select each element * of the tensor. Rank is also known as "order", "degree", or "ndims." * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Rank extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Rank(Pointer p) { super(p); } public Rank(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Rank operation(Operation operation); public native @ByRef Output output(); public native Rank output(Output output); } /** Reshapes a tensor. * * Given {@code tensor}, this operation returns a tensor that has the same values * as {@code tensor} with shape {@code shape}. * * If one component of {@code shape} is the special value -1, the size of that dimension * is computed so that the total size remains constant. In particular, a {@code shape} * of {@code [-1]} flattens into 1-D. At most one component of {@code shape} can be -1. * * If {@code shape} is 1-D or higher, then the operation returns a tensor with shape * {@code shape} filled with the values of {@code tensor}. In this case, the number of elements * implied by {@code shape} must be the same as the number of elements in {@code tensor}. * * For example: * *
{@code
 *  # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
 *  # tensor 't' has shape [9]
 *  reshape(t, [3, 3]) ==> [[1, 2, 3],
 *                          [4, 5, 6],
 *                          [7, 8, 9]]
 * 
 *  # tensor 't' is [[[1, 1], [2, 2]],
 *  #                [[3, 3], [4, 4]]]
 *  # tensor 't' has shape [2, 2, 2]
 *  reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
 *                          [3, 3, 4, 4]]
 * 
 *  # tensor 't' is [[[1, 1, 1],
 *  #                 [2, 2, 2]],
 *  #                [[3, 3, 3],
 *  #                 [4, 4, 4]],
 *  #                [[5, 5, 5],
 *  #                 [6, 6, 6]]]
 *  # tensor 't' has shape [3, 2, 3]
 *  # pass '[-1]' to flatten 't'
 *  reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
 * 
 *  # -1 can also be used to infer the shape
 * 
 *  # -1 is inferred to be 9:
 *  reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
 *                           [4, 4, 4, 5, 5, 5, 6, 6, 6]]
 *  # -1 is inferred to be 2:
 *  reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
 *                           [4, 4, 4, 5, 5, 5, 6, 6, 6]]
 *  # -1 is inferred to be 3:
 *  reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
 *                                [2, 2, 2],
 *                                [3, 3, 3]],
 *                               [[4, 4, 4],
 *                                [5, 5, 5],
 *                                [6, 6, 6]]]
 * 
 *  # tensor 't' is [7]
 *  # shape `[]` reshapes to a scalar
 *  reshape(t, []) ==> 7
 *  }
* * Arguments: * * scope: A Scope object * * shape: Defines the shape of the output tensor. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Reshape extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Reshape(Pointer p) { super(p); } public Reshape(@Const @ByRef Scope scope, @ByVal Input tensor, @ByVal Input shape) { super((Pointer)null); allocate(scope, tensor, shape); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input tensor, @ByVal Input shape); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Reshape operation(Operation operation); public native @ByRef Output output(); public native Reshape output(Output output); } /** Assign {@code value} to the sliced l-value reference of {@code ref}. * * The values of {@code value} are assigned to the positions in the variable * {@code ref} that are selected by the slice parameters. The slice parameters * {@code begin, }end{@code , }strides{@code , etc. work exactly as in }StridedSlice{@code . * * NOTE this op currently does not support broadcasting and so }value{@code 's * shape must be exactly the shape produced by the slice of }ref{@code . * * Arguments: * * scope: A Scope object * * Returns: * * the created }Operation{@code */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceStridedSliceAssign extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceStridedSliceAssign(Pointer p) { super(p); } /** Optional attribute setters for ResourceStridedSliceAssign */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs BeginMask(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs EndMask(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs EllipsisMask(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs NewAxisMask(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs ShrinkAxisMask(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long begin_mask_(); public native Attrs begin_mask_(long begin_mask_); public native @Cast("tensorflow::int64") long end_mask_(); public native Attrs end_mask_(long end_mask_); public native @Cast("tensorflow::int64") long ellipsis_mask_(); public native Attrs ellipsis_mask_(long ellipsis_mask_); public native @Cast("tensorflow::int64") long new_axis_mask_(); public native Attrs new_axis_mask_(long new_axis_mask_); public native @Cast("tensorflow::int64") long shrink_axis_mask_(); public native Attrs shrink_axis_mask_(long shrink_axis_mask_); } public ResourceStridedSliceAssign(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input begin, @ByVal Input end, @ByVal Input strides, @ByVal Input value) { super((Pointer)null); allocate(scope, ref, begin, end, strides, value); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input begin, @ByVal Input end, @ByVal Input strides, @ByVal Input value); public ResourceStridedSliceAssign(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input begin, @ByVal Input end, @ByVal Input strides, @ByVal Input value, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, ref, begin, end, strides, value, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input begin, @ByVal Input end, @ByVal Input strides, @ByVal Input value, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs BeginMask(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs EndMask(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs EllipsisMask(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs NewAxisMask(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs ShrinkAxisMask(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native ResourceStridedSliceAssign operation(Operation operation); } /** Reverses variable length slices. * * This op first slices {@code input} along the dimension {@code batch_dim}, and for each * slice {@code i}, reverses the first {@code seq_lengths[i]} elements along * the dimension {@code seq_dim}. * * The elements of {@code seq_lengths} must obey {@code seq_lengths[i] <= input.dims[seq_dim]}, * and {@code seq_lengths} must be a vector of length {@code input.dims[batch_dim]}. * * The output slice {@code i} along dimension {@code batch_dim} is then given by input * slice {@code i}, with the first {@code seq_lengths[i]} slices along dimension * {@code seq_dim} reversed. * * For example: * *
{@code
 *  # Given this:
 *  batch_dim = 0
 *  seq_dim = 1
 *  input.dims = (4, 8, ...)
 *  seq_lengths = [7, 2, 3, 5]
 * 
 *  # then slices of input are reversed on seq_dim, but only up to seq_lengths:
 *  output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
 *  output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
 *  output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
 *  output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
 * 
 *  # while entries past seq_lens are copied through:
 *  output[0, 7:, :, ...] = input[0, 7:, :, ...]
 *  output[1, 2:, :, ...] = input[1, 2:, :, ...]
 *  output[2, 3:, :, ...] = input[2, 3:, :, ...]
 *  output[3, 2:, :, ...] = input[3, 2:, :, ...]
 *  }
* * In contrast, if: * *
{@code
 *  # Given this:
 *  batch_dim = 2
 *  seq_dim = 0
 *  input.dims = (8, ?, 4, ...)
 *  seq_lengths = [7, 2, 3, 5]
 * 
 *  # then slices of input are reversed on seq_dim, but only up to seq_lengths:
 *  output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
 *  output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
 *  output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
 *  output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
 * 
 *  # while entries past seq_lens are copied through:
 *  output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
 *  output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
 *  output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
 *  output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
 *  }
* * Arguments: * * scope: A Scope object * * input: The input to reverse. * * seq_lengths: 1-D with length {@code input.dims(batch_dim)} and * {@code max(seq_lengths) <= input.dims(seq_dim)} * * seq_dim: The dimension which is partially reversed. * * Optional attributes (see {@code Attrs}): * * batch_dim: The dimension along which reversal is performed. * * Returns: * * {@code Output}: The partially reversed input. It has the same shape as {@code input}. */ @Namespace("tensorflow::ops") @NoOffset public static class ReverseSequence extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReverseSequence(Pointer p) { super(p); } /** Optional attribute setters for ReverseSequence */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The dimension along which reversal is performed. * * Defaults to 0 */ public native @ByVal Attrs BatchDim(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long batch_dim_(); public native Attrs batch_dim_(long batch_dim_); } public ReverseSequence(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input seq_lengths, @Cast("tensorflow::int64") long seq_dim) { super((Pointer)null); allocate(scope, input, seq_lengths, seq_dim); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input seq_lengths, @Cast("tensorflow::int64") long seq_dim); public ReverseSequence(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input seq_lengths, @Cast("tensorflow::int64") long seq_dim, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, seq_lengths, seq_dim, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input seq_lengths, @Cast("tensorflow::int64") long seq_dim, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs BatchDim(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native ReverseSequence operation(Operation operation); public native @ByRef Output output(); public native ReverseSequence output(Output output); } /** Reverses specific dimensions of a tensor. * * NOTE {@code tf.reverse} has now changed behavior in preparation for 1.0. * {@code tf.reverse_v2} is currently an alias that will be deprecated before TF 1.0. * * Given a {@code tensor}, and a {@code int32} tensor {@code axis} representing the set of * dimensions of {@code tensor} to reverse. This operation reverses each dimension * {@code i} for which there exists {@code j} s.t. {@code axis[j] == i}. * * {@code tensor} can have up to 8 dimensions. The number of dimensions specified * in {@code axis} may be 0 or more entries. If an index is specified more than * once, a InvalidArgument error is raised. * * For example: * *
{@code
 *  # tensor 't' is [[[[ 0,  1,  2,  3],
 *  #                  [ 4,  5,  6,  7],
 *  #                  [ 8,  9, 10, 11]],
 *  #                 [[12, 13, 14, 15],
 *  #                  [16, 17, 18, 19],
 *  #                  [20, 21, 22, 23]]]]
 *  # tensor 't' shape is [1, 2, 3, 4]
 * 
 *  # 'dims' is [3] or 'dims' is [-1]
 *  reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
 *                          [ 7,  6,  5,  4],
 *                          [ 11, 10, 9, 8]],
 *                         [[15, 14, 13, 12],
 *                          [19, 18, 17, 16],
 *                          [23, 22, 21, 20]]]]
 * 
 *  # 'dims' is '[1]' (or 'dims' is '[-3]')
 *  reverse(t, dims) ==> [[[[12, 13, 14, 15],
 *                          [16, 17, 18, 19],
 *                          [20, 21, 22, 23]
 *                         [[ 0,  1,  2,  3],
 *                          [ 4,  5,  6,  7],
 *                          [ 8,  9, 10, 11]]]]
 * 
 *  # 'dims' is '[2]' (or 'dims' is '[-2]')
 *  reverse(t, dims) ==> [[[[8, 9, 10, 11],
 *                          [4, 5, 6, 7],
 *                          [0, 1, 2, 3]]
 *                         [[20, 21, 22, 23],
 *                          [16, 17, 18, 19],
 *                          [12, 13, 14, 15]]]]
 *  }
* * Arguments: * * scope: A Scope object * * tensor: Up to 8-D. * * axis: 1-D. The indices of the dimensions to reverse. Must be in the range * {@code [-rank(tensor), rank(tensor))}. * * Returns: * * {@code Output}: The same shape as {@code tensor}. */ @Namespace("tensorflow::ops") @NoOffset public static class Reverse extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Reverse(Pointer p) { super(p); } public Reverse(@Const @ByRef Scope scope, @ByVal Input tensor, @ByVal Input axis) { super((Pointer)null); allocate(scope, tensor, axis); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input tensor, @ByVal Input axis); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Reverse operation(Operation operation); public native @ByRef Output output(); public native Reverse output(Output output); } /** Scatter {@code updates} into a new tensor according to {@code indices}. * * Creates a new tensor by applying sparse {@code updates} to individual values or * slices within a tensor (initially zero for numeric, empty for string) of * the given {@code shape} according to indices. This operator is the inverse of the * {@code tf.gather_nd} operator which extracts values or slices from a given tensor. * * If {@code indices} contains duplicates, then their updates are accumulated (summed). * * **WARNING**: The order in which updates are applied is nondeterministic, so the * output will be nondeterministic if {@code indices} contains duplicates -- because * of some numerical approximation issues, numbers summed in different order * may yield different results. * * {@code indices} is an integer tensor containing indices into a new tensor of shape * {@code shape}. The last dimension of {@code indices} can be at most the rank of {@code shape}: * * indices.shape[-1] <= shape.rank * * The last dimension of {@code indices} corresponds to indices into elements * (if {@code indices.shape[-1] = shape.rank}) or slices * (if {@code indices.shape[-1] < shape.rank}) along dimension {@code indices.shape[-1]} of * {@code shape}. {@code updates} is a tensor with shape * * indices.shape[:-1] + shape[indices.shape[-1]:] * * The simplest form of scatter is to insert individual elements in a tensor by * index. For example, say we want to insert 4 scattered elements in a rank-1 * tensor with 8 elements. * *
* *
* * In Python, this scatter operation would look like this: * *
{@code python
 *      indices = tf.constant([[4], [3], [1], [7]])
 *      updates = tf.constant([9, 10, 11, 12])
 *      shape = tf.constant([8])
 *      scatter = tf.scatter_nd(indices, updates, shape)
 *      with tf.Session() as sess:
 *        print(sess.run(scatter))
 *  }
* * The resulting tensor would look like this: * * [0, 11, 0, 10, 9, 0, 0, 12] * * We can also, insert entire slices of a higher rank tensor all at once. For * example, if we wanted to insert two slices in the first dimension of a * rank-3 tensor with two matrices of new values. * *
* *
* * In Python, this scatter operation would look like this: * *
{@code python
 *      indices = tf.constant([[0], [2]])
 *      updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
 *                              [7, 7, 7, 7], [8, 8, 8, 8]],
 *                             [[5, 5, 5, 5], [6, 6, 6, 6],
 *                              [7, 7, 7, 7], [8, 8, 8, 8]]])
 *      shape = tf.constant([4, 4, 4])
 *      scatter = tf.scatter_nd(indices, updates, shape)
 *      with tf.Session() as sess:
 *        print(sess.run(scatter))
 *  }
* * The resulting tensor would look like this: * * [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], * [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. * * Arguments: * * scope: A Scope object * * indices: Index tensor. * * updates: Updates to scatter into output. * * shape: 1-D. The shape of the resulting tensor. * * Returns: * * {@code Output}: A new tensor with the given shape and updates applied according * to the indices. */ @Namespace("tensorflow::ops") @NoOffset public static class ScatterNd extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ScatterNd(Pointer p) { super(p); } public ScatterNd(@Const @ByRef Scope scope, @ByVal Input indices, @ByVal Input updates, @ByVal Input shape) { super((Pointer)null); allocate(scope, indices, updates, shape); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input indices, @ByVal Input updates, @ByVal Input shape); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native ScatterNd operation(Operation operation); public native @ByRef Output output(); public native ScatterNd output(Output output); } /** Applies sparse addition to {@code input} using individual values or slices * * from {@code updates} according to indices {@code indices}. The updates are non-aliasing: * {@code input} is only modified in-place if no other operations will use it. * Otherwise, a copy of {@code input} is made. This operation has a gradient with * respect to both {@code input} and {@code updates}. * * {@code input} is a {@code Tensor} with rank {@code P} and {@code indices} is a {@code Tensor} of rank {@code Q}. * * {@code indices} must be integer tensor, containing indices into {@code input}. * It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where {@code 0 < K <= P}. * * The innermost dimension of {@code indices} (with length {@code K}) corresponds to * indices into elements (if {@code K = P}) or {@code (P-K)}-dimensional slices * (if {@code K < P}) along the {@code K}th dimension of {@code input}. * * {@code updates} is {@code Tensor} of rank {@code Q-1+P-K} with shape: * * $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ * * For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 * elements. In Python, that addition would look like this: * * input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) * indices = tf.constant([[4], [3], [1], [7]]) * updates = tf.constant([9, 10, 11, 12]) * output = tf.scatter_nd_non_aliasing_add(input, indices, updates) * with tf.Session() as sess: * print(sess.run(output)) * * The resulting value {@code output} would look like this: * * [1, 13, 3, 14, 14, 6, 7, 20] * * See {@code tf.scatter_nd} for more details about how to make updates to slices. * * Arguments: * * scope: A Scope object * * input: A Tensor. * * indices: A Tensor. Must be one of the following types: {@code int32}, {@code int64}. * A tensor of indices into {@code input}. * * updates: A Tensor. Must have the same type as ref. A tensor of updated values * to add to {@code input}. * * Returns: * * {@code Output}: A {@code Tensor} with the same shape as {@code input}, containing values of {@code input} * updated with {@code updates}. */ @Namespace("tensorflow::ops") @NoOffset public static class ScatterNdNonAliasingAdd extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ScatterNdNonAliasingAdd(Pointer p) { super(p); } public ScatterNdNonAliasingAdd(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input indices, @ByVal Input updates) { super((Pointer)null); allocate(scope, input, indices, updates); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input indices, @ByVal Input updates); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native ScatterNdNonAliasingAdd operation(Operation operation); public native @ByRef Output output(); public native ScatterNdNonAliasingAdd output(Output output); } /** Returns the shape of a tensor. * * This operation returns a 1-D integer tensor representing the shape of {@code input}. * * For example: * *
{@code
 *  # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
 *  shape(t) ==> [2, 2, 3]
 *  }
* * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The output tensor. */ /** Returns shape of tensors. * * This operation returns N 1-D integer tensors representing shape of {@code input[i]s}. * * Arguments: * * scope: A Scope object * * Returns: * * {@code OutputList}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class ShapeN extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ShapeN(Pointer p) { super(p); } /** Optional attribute setters for ShapeN */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_INT32 */ public native @ByVal Attrs OutType(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int out_type_(); public native Attrs out_type_(int out_type_); } public ShapeN(@Const @ByRef Scope scope, @ByVal InputList input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList input); public ShapeN(@Const @ByRef Scope scope, @ByVal InputList input, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList input, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator []") Output get(@Cast("size_t") long index); public static native @ByVal Attrs OutType(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native ShapeN operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector output(); public native ShapeN output(OutputVector output); } /** Returns the size of a tensor. * * This operation returns an integer representing the number of elements in * {@code input}. * * For example: * *
{@code
 *  # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
 *  size(t) ==> 12
 *  }
* * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Size extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Size(Pointer p) { super(p); } /** Optional attribute setters for Size */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_INT32 */ public native @ByVal Attrs OutType(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int out_type_(); public native Attrs out_type_(int out_type_); } public Size(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public Size(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs OutType(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native Size operation(Operation operation); public native @ByRef Output output(); public native Size output(Output output); } /** Return a slice from 'input'. * * The output tensor is a tensor with dimensions described by 'size' * whose values are extracted from 'input' starting at the offsets in * 'begin'. * * *Requirements*: * 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) * * Arguments: * * scope: A Scope object * * begin: begin[i] specifies the offset into the 'i'th dimension of * 'input' to slice from. * * size: size[i] specifies the number of elements of the 'i'th dimension * of 'input' to slice. If size[i] is -1, all remaining elements in dimension * i are included in the slice (i.e. this is equivalent to setting * size[i] = input.dim_size(i) - begin[i]). * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Slice extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Slice(Pointer p) { super(p); } public Slice(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input begin, @ByVal Input size) { super((Pointer)null); allocate(scope, input, begin, size); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input begin, @ByVal Input size); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Slice operation(Operation operation); public native @ByRef Output output(); public native Slice output(Output output); } /** Returns a copy of the input tensor. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Snapshot extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Snapshot(Pointer p) { super(p); } public Snapshot(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Snapshot operation(Operation operation); public native @ByRef Output output(); public native Snapshot output(Output output); } /** SpaceToBatch for 4-D tensors of type T. * * This is a legacy version of the more general SpaceToBatchND. * * Zero-pads and then rearranges (permutes) blocks of spatial data into batch. * More specifically, this op outputs a copy of the input tensor where values from * the {@code height} and {@code width} dimensions are moved to the {@code batch} dimension. After * the zero-padding, both {@code height} and {@code width} of the input must be divisible by the * block size. * * Arguments: * * scope: A Scope object * * input: 4-D with shape {@code [batch, height, width, depth]}. * * paddings: 2-D tensor of non-negative integers with shape {@code [2, 2]}. It specifies * the padding of the input with zeros across the spatial dimensions as follows: * * paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] * * The effective spatial dimensions of the zero-padded input tensor will be: * * height_pad = pad_top + height + pad_bottom * width_pad = pad_left + width + pad_right * * The attr {@code block_size} must be greater than one. It indicates the block size. * * * Non-overlapping blocks of size {@code block_size x block size} in the height and * width dimensions are rearranged into the batch dimension at each location. * * The batch of the output tensor is {@code batch * block_size * block_size}. * * Both height_pad and width_pad must be divisible by block_size. * * The shape of the output will be: * * [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, * depth] * * Some examples: * * (1) For the following input of shape {@code [1, 2, 2, 1]} and block_size of 2: * *
{@code
 *  x = [[[[1], [2]], [[3], [4]]]]
 *  }
* * The output tensor has shape {@code [4, 1, 1, 1]} and value: * *
{@code
 *  [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
 *  }
* * (2) For the following input of shape {@code [1, 2, 2, 3]} and block_size of 2: * *
{@code
 *  x = [[[[1, 2, 3], [4, 5, 6]],
 *        [[7, 8, 9], [10, 11, 12]]]]
 *  }
* * The output tensor has shape {@code [4, 1, 1, 3]} and value: * *
{@code
 *  [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
 *  }
* * (3) For the following input of shape {@code [1, 4, 4, 1]} and block_size of 2: * *
{@code
 *  x = [[[[1],   [2],  [3],  [4]],
 *        [[5],   [6],  [7],  [8]],
 *        [[9],  [10], [11],  [12]],
 *        [[13], [14], [15],  [16]]]]
 *  }
* * The output tensor has shape {@code [4, 2, 2, 1]} and value: * *
{@code
 *  x = [[[[1], [3]], [[9], [11]]],
 *       [[[2], [4]], [[10], [12]]],
 *       [[[5], [7]], [[13], [15]]],
 *       [[[6], [8]], [[14], [16]]]]
 *  }
* * (4) For the following input of shape {@code [2, 2, 4, 1]} and block_size of 2: * *
{@code
 *  x = [[[[1],   [2],  [3],  [4]],
 *        [[5],   [6],  [7],  [8]]],
 *       [[[9],  [10], [11],  [12]],
 *        [[13], [14], [15],  [16]]]]
 *  }
* * The output tensor has shape {@code [8, 1, 2, 1]} and value: * *
{@code
 *  x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
 *       [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
 *  }
* * Among others, this operation is useful for reducing atrous convolution into * regular convolution. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class SpaceToBatch extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SpaceToBatch(Pointer p) { super(p); } public SpaceToBatch(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings, @Cast("tensorflow::int64") long block_size) { super((Pointer)null); allocate(scope, input, paddings, block_size); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings, @Cast("tensorflow::int64") long block_size); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SpaceToBatch operation(Operation operation); public native @ByRef Output output(); public native SpaceToBatch output(Output output); } /** SpaceToBatch for N-D tensors of type T. * * This operation divides "spatial" dimensions {@code [1, ..., M]} of the input into a * grid of blocks of shape {@code block_shape}, and interleaves these blocks with the * "batch" dimension (0) such that in the output, the spatial dimensions * {@code [1, ..., M]} correspond to the position within the grid, and the batch * dimension combines both the position within a spatial block and the original * batch position. Prior to division into blocks, the spatial dimensions of the * input are optionally zero padded according to {@code paddings}. See below for a * precise description. * * Arguments: * * scope: A Scope object * * input: N-D with shape {@code input_shape = [batch] + spatial_shape + remaining_shape}, * where spatial_shape has {@code M} dimensions. * * block_shape: 1-D with shape {@code [M]}, all values must be >= 1. * * paddings: 2-D with shape {@code [M, 2]}, all values must be >= 0. * {@code paddings[i] = [pad_start, pad_end]} specifies the padding for input dimension * {@code i + 1}, which corresponds to spatial dimension {@code i}. It is required that * {@code block_shape[i]} divides {@code input_shape[i + 1] + pad_start + pad_end}. * * This operation is equivalent to the following steps: * * 1. Zero-pad the start and end of dimensions {@code [1, ..., M]} of the * input according to {@code paddings} to produce {@code padded} of shape {@code padded_shape}. * * 2. Reshape {@code padded} to {@code reshaped_padded} of shape: * * [batch] + * [padded_shape[1] / block_shape[0], * block_shape[0], * ..., * padded_shape[M] / block_shape[M-1], * block_shape[M-1]] + * remaining_shape * * 3. Permute dimensions of {@code reshaped_padded} to produce * {@code permuted_reshaped_padded} of shape: * * block_shape + * [batch] + * [padded_shape[1] / block_shape[0], * ..., * padded_shape[M] / block_shape[M-1]] + * remaining_shape * * 4. Reshape {@code permuted_reshaped_padded} to flatten {@code block_shape} into the batch * dimension, producing an output tensor of shape: * * [batch * prod(block_shape)] + * [padded_shape[1] / block_shape[0], * ..., * padded_shape[M] / block_shape[M-1]] + * remaining_shape * * Some examples: * * (1) For the following input of shape {@code [1, 2, 2, 1]}, {@code block_shape = [2, 2]}, and * {@code paddings = [[0, 0], [0, 0]]}: * *
{@code
 *  x = [[[[1], [2]], [[3], [4]]]]
 *  }
* * The output tensor has shape {@code [4, 1, 1, 1]} and value: * *
{@code
 *  [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
 *  }
* * (2) For the following input of shape {@code [1, 2, 2, 3]}, {@code block_shape = [2, 2]}, and * {@code paddings = [[0, 0], [0, 0]]}: * *
{@code
 *  x = [[[[1, 2, 3], [4, 5, 6]],
 *        [[7, 8, 9], [10, 11, 12]]]]
 *  }
* * The output tensor has shape {@code [4, 1, 1, 3]} and value: * *
{@code
 *  [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
 *  }
* * (3) For the following input of shape {@code [1, 4, 4, 1]}, {@code block_shape = [2, 2]}, and * {@code paddings = [[0, 0], [0, 0]]}: * *
{@code
 *  x = [[[[1],   [2],  [3],  [4]],
 *        [[5],   [6],  [7],  [8]],
 *        [[9],  [10], [11],  [12]],
 *        [[13], [14], [15],  [16]]]]
 *  }
* * The output tensor has shape {@code [4, 2, 2, 1]} and value: * *
{@code
 *  x = [[[[1], [3]], [[9], [11]]],
 *       [[[2], [4]], [[10], [12]]],
 *       [[[5], [7]], [[13], [15]]],
 *       [[[6], [8]], [[14], [16]]]]
 *  }
* * (4) For the following input of shape {@code [2, 2, 4, 1]}, block_shape = {@code [2, 2]}, and * paddings = {@code [[0, 0], [2, 0]]}: * *
{@code
 *  x = [[[[1],   [2],  [3],  [4]],
 *        [[5],   [6],  [7],  [8]]],
 *       [[[9],  [10], [11],  [12]],
 *        [[13], [14], [15],  [16]]]]
 *  }
* * The output tensor has shape {@code [8, 1, 3, 1]} and value: * *
{@code
 *  x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
 *       [[[0], [2], [4]]], [[[0], [10], [12]]],
 *       [[[0], [5], [7]]], [[[0], [13], [15]]],
 *       [[[0], [6], [8]]], [[[0], [14], [16]]]]
 *  }
* * Among others, this operation is useful for reducing atrous convolution into * regular convolution. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class SpaceToBatchND extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SpaceToBatchND(Pointer p) { super(p); } public SpaceToBatchND(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input block_shape, @ByVal Input paddings) { super((Pointer)null); allocate(scope, input, block_shape, paddings); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input block_shape, @ByVal Input paddings); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SpaceToBatchND operation(Operation operation); public native @ByRef Output output(); public native SpaceToBatchND output(Output output); } /** SpaceToDepth for tensors of type T. * * Rearranges blocks of spatial data, into depth. More specifically, * this op outputs a copy of the input tensor where values from the {@code height} * and {@code width} dimensions are moved to the {@code depth} dimension. * The attr {@code block_size} indicates the input block size. * * * Non-overlapping blocks of size {@code block_size x block size} are rearranged * into depth at each location. * * The depth of the output tensor is {@code block_size * block_size * input_depth}. * * The Y, X coordinates within each block of the input become the high order * component of the output channel index. * * The input tensor's height and width must be divisible by block_size. * * The {@code data_format} attr specifies the layout of the input and output tensors * with the following options: * "NHWC": {@code [ batch, height, width, channels ]} * "NCHW": {@code [ batch, channels, height, width ]} * "NCHW_VECT_C": * {@code qint8 [ batch, channels / 4, height, width, 4 ]} * * It is useful to consider the operation as transforming a 6-D Tensor. * e.g. for data_format = NHWC, * Each element in the input tensor can be specified via 6 coordinates, * ordered by decreasing memory layout significance as: * n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates * within the output image, bX, bY means coordinates * within the input block, iC means input channels). * The output would be a transpose to the following layout: * n,oY,oX,bY,bX,iC * * This operation is useful for resizing the activations between convolutions * (but keeping all data), e.g. instead of pooling. It is also useful for training * purely convolutional models. * * For example, given an input of shape {@code [1, 2, 2, 1]}, data_format = "NHWC" and * block_size = 2: * *
{@code
 *  x = [[[[1], [2]],
 *        [[3], [4]]]]
 *  }
* * This operation will output a tensor of shape {@code [1, 1, 1, 4]}: * *
{@code
 *  [[[[1, 2, 3, 4]]]]
 *  }
* * Here, the input has a batch of 1 and each batch element has shape {@code [2, 2, 1]}, * the corresponding output will have a single element (i.e. width and height are * both 1) and will have a depth of 4 channels (1 * block_size * block_size). * The output element shape is {@code [1, 1, 4]}. * * For an input tensor with larger depth, here of shape {@code [1, 2, 2, 3]}, e.g. * *
{@code
 *  x = [[[[1, 2, 3], [4, 5, 6]],
 *        [[7, 8, 9], [10, 11, 12]]]]
 *  }
* * This operation, for block_size of 2, will return the following tensor of shape * {@code [1, 1, 1, 12]} * *
{@code
 *  [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
 *  }
* * Similarly, for the following input of shape {@code [1 4 4 1]}, and a block size of 2: * *
{@code
 *  x = [[[[1],   [2],  [5],  [6]],
 *        [[3],   [4],  [7],  [8]],
 *        [[9],  [10], [13],  [14]],
 *        [[11], [12], [15],  [16]]]]
 *  }
* * the operator will return the following tensor of shape {@code [1 2 2 4]}: * *
{@code
 *  x = [[[[1, 2, 3, 4],
 *         [5, 6, 7, 8]],
 *        [[9, 10, 11, 12],
 *         [13, 14, 15, 16]]]]
 *  }
* * Arguments: * * scope: A Scope object * * block_size: The size of the spatial block. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class SpaceToDepth extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SpaceToDepth(Pointer p) { super(p); } /** Optional attribute setters for SpaceToDepth */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to "NHWC" */ public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); } public SpaceToDepth(@Const @ByRef Scope scope, @ByVal Input input, @Cast("tensorflow::int64") long block_size) { super((Pointer)null); allocate(scope, input, block_size); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Cast("tensorflow::int64") long block_size); public SpaceToDepth(@Const @ByRef Scope scope, @ByVal Input input, @Cast("tensorflow::int64") long block_size, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, block_size, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Cast("tensorflow::int64") long block_size, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public native @ByRef Operation operation(); public native SpaceToDepth operation(Operation operation); public native @ByRef Output output(); public native SpaceToDepth output(Output output); } /** Splits a tensor into {@code num_split} tensors along one dimension. * * Arguments: * * scope: A Scope object * * axis: 0-D. The dimension along which to split. Must be in the range * {@code [-rank(value), rank(value))}. * * value: The tensor to split. * * num_split: The number of ways to split. Must evenly divide * {@code value.shape[split_dim]}. * * Returns: * * {@code OutputList}: They are identically shaped tensors, whose shape matches that of {@code value} * except along {@code axis}, where their sizes are * {@code values.shape[split_dim] / num_split}. */ @Namespace("tensorflow::ops") @NoOffset public static class Split extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Split(Pointer p) { super(p); } public Split(@Const @ByRef Scope scope, @ByVal Input axis, @ByVal Input value, @Cast("tensorflow::int64") long num_split) { super((Pointer)null); allocate(scope, axis, value, num_split); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input axis, @ByVal Input value, @Cast("tensorflow::int64") long num_split); public native @ByVal @Name("operator []") Output get(@Cast("size_t") long index); public native @ByRef Operation operation(); public native Split operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector output(); public native Split output(OutputVector output); } /** Splits a tensor into {@code num_split} tensors along one dimension. * * Arguments: * * scope: A Scope object * * value: The tensor to split. * * size_splits: list containing the sizes of each output tensor along the split * dimension. Must sum to the dimension of value along split_dim. * Can contain one -1 indicating that dimension is to be inferred. * * axis: 0-D. The dimension along which to split. Must be in the range * {@code [-rank(value), rank(value))}. * * Returns: * * {@code OutputList}: Tensors whose shape matches that of {@code value} * except along {@code axis}, where their sizes are * {@code size_splits[i]}. */ @Namespace("tensorflow::ops") @NoOffset public static class SplitV extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SplitV(Pointer p) { super(p); } public SplitV(@Const @ByRef Scope scope, @ByVal Input value, @ByVal Input size_splits, @ByVal Input axis, @Cast("tensorflow::int64") long num_split) { super((Pointer)null); allocate(scope, value, size_splits, axis, num_split); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ByVal Input size_splits, @ByVal Input axis, @Cast("tensorflow::int64") long num_split); public native @ByVal @Name("operator []") Output get(@Cast("size_t") long index); public native @ByRef Operation operation(); public native SplitV operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector output(); public native SplitV output(OutputVector output); } /** Removes dimensions of size 1 from the shape of a tensor. * * Given a tensor {@code input}, this operation returns a tensor of the same type with * all dimensions of size 1 removed. If you don't want to remove all size 1 * dimensions, you can remove specific size 1 dimensions by specifying * {@code axis}. * * For example: * *
{@code
 *  # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
 *  shape(squeeze(t)) ==> [2, 3]
 *  }
* * Or, to remove specific size 1 dimensions: * *
{@code
 *  # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
 *  shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
 *  }
* * Arguments: * * scope: A Scope object * * input: The {@code input} to squeeze. * * Optional attributes (see {@code Attrs}): * * axis: If specified, only squeezes the dimensions listed. The dimension * index starts at 0. It is an error to squeeze a dimension that is not 1. Must * be in the range {@code [-rank(input), rank(input))}. * * Returns: * * {@code Output}: Contains the same data as {@code input}, but has one or more dimensions of * size 1 removed. */ @Namespace("tensorflow::ops") @NoOffset public static class Squeeze extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Squeeze(Pointer p) { super(p); } /** Optional attribute setters for Squeeze */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If specified, only squeezes the dimensions listed. The dimension * index starts at 0. It is an error to squeeze a dimension that is not 1. Must * be in the range {@code [-rank(input), rank(input))}. * * Defaults to [] */ public native @ByVal Attrs Axis(@ArraySlice IntPointer x); public native @ByVal Attrs Axis(@ArraySlice IntBuffer x); public native @ByVal Attrs Axis(@ArraySlice int... x); public native @ArraySlice IntPointer axis_(); public native Attrs axis_(IntPointer axis_); } public Squeeze(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public Squeeze(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Axis(@ArraySlice IntPointer x); public static native @ByVal Attrs Axis(@ArraySlice IntBuffer x); public static native @ByVal Attrs Axis(@ArraySlice int... x); public native @ByRef Operation operation(); public native Squeeze operation(Operation operation); public native @ByRef Output output(); public native Squeeze output(Output output); } /** Stops gradient computation. * * When executed in a graph, this op outputs its input tensor as-is. * * When building ops to compute gradients, this op prevents the contribution of * its inputs to be taken into account. Normally, the gradient generator adds ops * to a graph to compute the derivatives of a specified 'loss' by recursively * finding out inputs that contributed to its computation. If you insert this op * in the graph it inputs are masked from the gradient generator. They are not * taken into account for computing gradients. * * This is useful any time you want to compute a value with TensorFlow but need * to pretend that the value was a constant. Some examples include: * * * The *EM* algorithm where the *M-step* should not involve backpropagation * through the output of the *E-step*. * * Contrastive divergence training of Boltzmann machines where, when * differentiating the energy function, the training must not backpropagate * through the graph that generated the samples from the model. * * Adversarial training, where no backprop should happen through the adversarial * example generation process. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class StopGradient extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StopGradient(Pointer p) { super(p); } public StopGradient(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native StopGradient operation(Operation operation); public native @ByRef Output output(); public native StopGradient output(Output output); } /** Return a strided slice from {@code input}. * * Note, most python users will want to use the Python {@code Tensor.__getitem__} * or {@code Variable.__getitem__} rather than this op directly. * * The goal of this op is to produce a new tensor with a subset of * the elements from the {@code n} dimensional {@code input} tensor. The subset is chosen using * a sequence of {@code m} sparse range specifications encoded into the arguments * of this function. Note, in some cases * {@code m} could be equal to {@code n}, but this need not be the case. Each * range specification entry can be one of the following: * * - An ellipsis (...). Ellipses are used to imply zero or more * dimensions of full-dimension selection and are produced using * {@code ellipsis_mask}. For example, {@code foo[...]} is the identity slice. * * - A new axis. This is used to insert a new shape=1 dimension and is * produced using {@code new_axis_mask}. For example, {@code foo[:, ...]} where * {@code foo} is shape {@code (3, 4)} produces a {@code (1, 3, 4)} tensor. * * * - A range {@code begin:end:stride}. This is used to specify how much to choose from * a given dimension. {@code stride} can be any integer but 0. {@code begin} is an integer * which represents the index of the first value to select while {@code end} represents * the index of the last value to select. The number of values selected in each * dimension is {@code end - begin} if {@code stride > 0} and {@code begin - end} if {@code stride < 0}. * {@code begin} and {@code end} can be negative where {@code -1} is the last element, {@code -2} is * the second to last. {@code begin_mask} controls whether to replace the explicitly * given {@code begin} with an implicit effective value of {@code 0} if {@code stride > 0} and * {@code -1} if {@code stride < 0}. {@code end_mask} is analogous but produces the number * required to create the largest open interval. For example, given a shape * {@code (3,)} tensor {@code foo[:]}, the effective {@code begin} and {@code end} are {@code 0} and {@code 3}. Do * not assume this is equivalent to {@code foo[0:-1]} which has an effective {@code begin} * and {@code end} of {@code 0} and {@code 2}. Another example is {@code foo[-2::-1]} which reverses the * first dimension of a tensor while dropping the last two (in the original * order elements). For example {@code foo = [1,2,3,4]; foo[-2::-1]} is {@code [4,3]}. * * - A single index. This is used to keep only elements that have a given * index. For example ({@code foo[2, :]} on a shape {@code (5,6)} tensor produces a * shape {@code (6,)} tensor. This is encoded in {@code begin} and {@code end} and * {@code shrink_axis_mask}. * * Each conceptual range specification is encoded in the op's argument. This * encoding is best understand by considering a non-trivial example. In * particular, * {@code foo[1, 2:4, None, ..., :-3:-1, :]} will be encoded as * *
{@code
 *  begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
 *  end = [2, 4, x, x, -3, x]
 *  strides = [1, 1, x, x, -1, 1]
 *  begin_mask = 1<<4 | 1 << 5 = 48
 *  end_mask = 1<<5 = 32
 *  ellipsis_mask = 1<<3 = 8
 *  new_axis_mask = 1<<2 4
 *  shrink_axis_mask = 1<<0
 *  }
* * In this case if {@code foo.shape} is (5, 5, 5, 5, 5, 5) the final shape of * the slice becomes (2, 1, 5, 5, 2, 5). * Let us walk step by step through each argument specification. * * 1. The first argument in the example slice is turned into {@code begin = 1} and * {@code end = begin + 1 = 2}. To disambiguate from the original spec {@code 2:4} we * also set the appropriate bit in {@code shrink_axis_mask}. * * 2. {@code 2:4} is contributes 2, 4, 1 to begin, end, and stride. All masks have * zero bits contributed. * * 3. None is a synonym for {@code tf.newaxis}. This means insert a dimension of size 1 * dimension in the final shape. Dummy values are contributed to begin, * end and stride, while the new_axis_mask bit is set. * * 4. {@code ...} grab the full ranges from as many dimensions as needed to * fully specify a slice for every dimension of the input shape. * * 5. {@code :-3:-1} shows the use of negative indices. A negative index {@code i} associated * with a dimension that has shape {@code s} is converted to a positive index * {@code s + i}. So {@code -1} becomes {@code s-1} (i.e. the last element). This conversion * is done internally so begin, end and strides receive x, -3, and -1. * The appropriate begin_mask bit is set to indicate the start range is the * full range (ignoring the x). * * 6. {@code :} indicates that the entire contents of the corresponding dimension * is selected. This is equivalent to {@code ::} or {@code 0::1}. begin, end, and strides * receive 0, 0, and 1, respectively. The appropriate bits in {@code begin_mask} and * {@code end_mask} are also set. * * *Requirements*: * {@code 0 != strides[i] for i in [0, m)} * {@code ellipsis_mask must be a power of two (only one ellipsis)} * * Arguments: * * scope: A Scope object * * begin: {@code begin[k]} specifies the offset into the {@code k}th range specification. * The exact dimension this corresponds to will be determined by context. * Out-of-bounds values will be silently clamped. If the {@code k}th bit of * {@code begin_mask} then {@code begin[k]} is ignored and the full range of the * appropriate dimension is used instead. Negative values causes indexing * to start from the highest element e.g. If {@code foo==[1,2,3]} then {@code foo[-1]==3}. * * end: {@code end[i]} is like {@code begin} with the exception that {@code end_mask} is * used to determine full ranges. * * strides: {@code strides[i]} specifies the increment in the {@code i}th specification * after extracting a given element. Negative indices will reverse * the original order. Out or range values are * clamped to {@code [0,dim[i]) if slice[i]>0} or {@code [-1,dim[i]-1] if slice[i] < 0} * * Optional attributes (see {@code Attrs}): * * begin_mask: a bitmask where a bit i being 1 means to ignore the begin * value and instead use the largest interval possible. At runtime * begin[i] will be replaced with {@code [0, n-1)} if {@code stride[i] > 0} or * {@code [-1, n-1]} if {@code stride[i] < 0} * * end_mask: analogous to {@code begin_mask} * * ellipsis_mask: a bitmask where bit {@code i} being 1 means the {@code i}th * position is actually an ellipsis. One bit at most can be 1. * If {@code ellipsis_mask == 0}, then an implicit ellipsis mask of {@code 1 << (m+1)} * is provided. This means that {@code foo[3:5] == foo[3:5, ...]}. An ellipsis * implicitly creates as many range specifications as necessary to fully * specify the sliced range for every dimension. For example for a 4-dimensional * tensor {@code foo} the slice {@code foo[2, ..., 5:8]} implies {@code foo[2, :, :, 5:8]}. * * new_axis_mask: a bitmask where bit {@code i} being 1 means the {@code i}th * specification creates a new shape 1 dimension. For example * {@code foo[:4, tf.newaxis, :2]} would produce a shape {@code (4, 1, 2)} tensor. * * shrink_axis_mask: a bitmask where bit {@code i} implies that the {@code i}th * specification should shrink the dimensionality. begin and end * must imply a slice of size 1 in the dimension. For example in * python one might do {@code foo[:, 3, :]} which would result in * {@code shrink_axis_mask} being 2. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class StridedSlice extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StridedSlice(Pointer p) { super(p); } /** Optional attribute setters for StridedSlice */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** a bitmask where a bit i being 1 means to ignore the begin * value and instead use the largest interval possible. At runtime * begin[i] will be replaced with {@code [0, n-1)} if {@code stride[i] > 0} or * {@code [-1, n-1]} if {@code stride[i] < 0} * * Defaults to 0 */ /// public native @ByVal Attrs BeginMask(@Cast("tensorflow::int64") long x); /** analogous to {@code begin_mask} * * Defaults to 0 */ /// public native @ByVal Attrs EndMask(@Cast("tensorflow::int64") long x); /** a bitmask where bit {@code i} being 1 means the {@code i}th * position is actually an ellipsis. One bit at most can be 1. * If {@code ellipsis_mask == 0}, then an implicit ellipsis mask of {@code 1 << (m+1)} * is provided. This means that {@code foo[3:5] == foo[3:5, ...]}. An ellipsis * implicitly creates as many range specifications as necessary to fully * specify the sliced range for every dimension. For example for a 4-dimensional * tensor {@code foo} the slice {@code foo[2, ..., 5:8]} implies {@code foo[2, :, :, 5:8]}. * * Defaults to 0 */ /// public native @ByVal Attrs EllipsisMask(@Cast("tensorflow::int64") long x); /** a bitmask where bit {@code i} being 1 means the {@code i}th * specification creates a new shape 1 dimension. For example * {@code foo[:4, tf.newaxis, :2]} would produce a shape {@code (4, 1, 2)} tensor. * * Defaults to 0 */ /// public native @ByVal Attrs NewAxisMask(@Cast("tensorflow::int64") long x); /** a bitmask where bit {@code i} implies that the {@code i}th * specification should shrink the dimensionality. begin and end * must imply a slice of size 1 in the dimension. For example in * python one might do {@code foo[:, 3, :]} which would result in * {@code shrink_axis_mask} being 2. * * Defaults to 0 */ public native @ByVal Attrs ShrinkAxisMask(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long begin_mask_(); public native Attrs begin_mask_(long begin_mask_); public native @Cast("tensorflow::int64") long end_mask_(); public native Attrs end_mask_(long end_mask_); public native @Cast("tensorflow::int64") long ellipsis_mask_(); public native Attrs ellipsis_mask_(long ellipsis_mask_); public native @Cast("tensorflow::int64") long new_axis_mask_(); public native Attrs new_axis_mask_(long new_axis_mask_); public native @Cast("tensorflow::int64") long shrink_axis_mask_(); public native Attrs shrink_axis_mask_(long shrink_axis_mask_); } public StridedSlice(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input begin, @ByVal Input end, @ByVal Input strides) { super((Pointer)null); allocate(scope, input, begin, end, strides); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input begin, @ByVal Input end, @ByVal Input strides); public StridedSlice(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input begin, @ByVal Input end, @ByVal Input strides, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, begin, end, strides, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input begin, @ByVal Input end, @ByVal Input strides, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs BeginMask(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs EndMask(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs EllipsisMask(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs NewAxisMask(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs ShrinkAxisMask(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native StridedSlice operation(Operation operation); public native @ByRef Output output(); public native StridedSlice output(Output output); } /** Assign {@code value} to the sliced l-value reference of {@code ref}. * * The values of {@code value} are assigned to the positions in the variable * {@code ref} that are selected by the slice parameters. The slice parameters * {@code begin, }end{@code , }strides{@code , etc. work exactly as in }StridedSlice{@code . * * NOTE this op currently does not support broadcasting and so }value{@code 's * shape must be exactly the shape produced by the slice of }ref{@code . * * Arguments: * * scope: A Scope object * * Returns: * * }Output{@code : The output_ref tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class StridedSliceAssign extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StridedSliceAssign(Pointer p) { super(p); } /** Optional attribute setters for StridedSliceAssign */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs BeginMask(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs EndMask(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs EllipsisMask(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs NewAxisMask(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs ShrinkAxisMask(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long begin_mask_(); public native Attrs begin_mask_(long begin_mask_); public native @Cast("tensorflow::int64") long end_mask_(); public native Attrs end_mask_(long end_mask_); public native @Cast("tensorflow::int64") long ellipsis_mask_(); public native Attrs ellipsis_mask_(long ellipsis_mask_); public native @Cast("tensorflow::int64") long new_axis_mask_(); public native Attrs new_axis_mask_(long new_axis_mask_); public native @Cast("tensorflow::int64") long shrink_axis_mask_(); public native Attrs shrink_axis_mask_(long shrink_axis_mask_); } public StridedSliceAssign(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input begin, @ByVal Input end, @ByVal Input strides, @ByVal Input value) { super((Pointer)null); allocate(scope, ref, begin, end, strides, value); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input begin, @ByVal Input end, @ByVal Input strides, @ByVal Input value); public StridedSliceAssign(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input begin, @ByVal Input end, @ByVal Input strides, @ByVal Input value, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, ref, begin, end, strides, value, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input begin, @ByVal Input end, @ByVal Input strides, @ByVal Input value, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs BeginMask(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs EndMask(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs EllipsisMask(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs NewAxisMask(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs ShrinkAxisMask(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native StridedSliceAssign operation(Operation operation); public native @ByRef Output output_ref(); public native StridedSliceAssign output_ref(Output output_ref); } /** Returns the gradient of {@code StridedSlice}. * * Since {@code StridedSlice} cuts out pieces of its {@code input} which is size * {@code shape}, its gradient will have the same shape (which is passed here * as {@code shape}). The gradient will be zero in any element that the slice * does not select. * * Arguments are the same as StridedSliceGrad with the exception that * {@code dy} is the input gradient to be propagated and {@code shape} is the * shape of {@code StridedSlice}'s {@code input}. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class StridedSliceGrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StridedSliceGrad(Pointer p) { super(p); } /** Optional attribute setters for StridedSliceGrad */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs BeginMask(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs EndMask(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs EllipsisMask(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs NewAxisMask(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs ShrinkAxisMask(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long begin_mask_(); public native Attrs begin_mask_(long begin_mask_); public native @Cast("tensorflow::int64") long end_mask_(); public native Attrs end_mask_(long end_mask_); public native @Cast("tensorflow::int64") long ellipsis_mask_(); public native Attrs ellipsis_mask_(long ellipsis_mask_); public native @Cast("tensorflow::int64") long new_axis_mask_(); public native Attrs new_axis_mask_(long new_axis_mask_); public native @Cast("tensorflow::int64") long shrink_axis_mask_(); public native Attrs shrink_axis_mask_(long shrink_axis_mask_); } public StridedSliceGrad(@Const @ByRef Scope scope, @ByVal Input shape, @ByVal Input begin, @ByVal Input end, @ByVal Input strides, @ByVal Input dy) { super((Pointer)null); allocate(scope, shape, begin, end, strides, dy); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input shape, @ByVal Input begin, @ByVal Input end, @ByVal Input strides, @ByVal Input dy); public StridedSliceGrad(@Const @ByRef Scope scope, @ByVal Input shape, @ByVal Input begin, @ByVal Input end, @ByVal Input strides, @ByVal Input dy, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, shape, begin, end, strides, dy, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input shape, @ByVal Input begin, @ByVal Input end, @ByVal Input strides, @ByVal Input dy, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs BeginMask(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs EndMask(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs EllipsisMask(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs NewAxisMask(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs ShrinkAxisMask(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native StridedSliceGrad operation(Operation operation); public native @ByRef Output output(); public native StridedSliceGrad output(Output output); } /** Constructs a tensor by tiling a given tensor. * * This operation creates a new tensor by replicating {@code input} {@code multiples} times. * The output tensor's i'th dimension has {@code input.dims(i) * multiples[i]} elements, * and the values of {@code input} are replicated {@code multiples[i]} times along the 'i'th * dimension. For example, tiling {@code [a b c d]} by {@code [2]} produces * {@code [a b c d a b c d]}. * * Arguments: * * scope: A Scope object * * input: 1-D or higher. * * multiples: 1-D. Length must be the same as the number of dimensions in {@code input} * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Tile extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Tile(Pointer p) { super(p); } public Tile(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input multiples) { super((Pointer)null); allocate(scope, input, multiples); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input multiples); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Tile operation(Operation operation); public native @ByRef Output output(); public native Tile output(Output output); } /** Shuffle dimensions of x according to a permutation. * * The output {@code y} has the same rank as {@code x}. The shapes of {@code x} and {@code y} satisfy: * {@code y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]} * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Transpose extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Transpose(Pointer p) { super(p); } public Transpose(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input perm) { super((Pointer)null); allocate(scope, x, perm); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input perm); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Transpose operation(Operation operation); public native @ByRef Output y(); public native Transpose y(Output y); } /** Finds unique elements in a 1-D tensor. * * This operation returns a tensor {@code y} containing all of the unique elements of {@code x} * sorted in the same order that they occur in {@code x}. This operation also returns a * tensor {@code idx} the same size as {@code x} that contains the index of each value of {@code x} * in the unique output {@code y}. In other words: * * {@code y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]} * * For example: * *
{@code
 *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
 *  y, idx = unique(x)
 *  y ==> [1, 2, 4, 7, 8]
 *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
 *  }
* * Arguments: * * scope: A Scope object * * x: 1-D. * * Returns: * * {@code Output} y: 1-D. * * {@code Output} idx: 1-D. */ @Namespace("tensorflow::ops") @NoOffset public static class Unique extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Unique(Pointer p) { super(p); } /** Optional attribute setters for Unique */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_INT32 */ public native @ByVal Attrs OutIdx(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int out_idx_(); public native Attrs out_idx_(int out_idx_); } public Unique(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public Unique(@Const @ByRef Scope scope, @ByVal Input x, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, x, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @Const @ByRef Attrs attrs); public static native @ByVal Attrs OutIdx(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native Unique operation(Operation operation); public native @ByRef Output y(); public native Unique y(Output y); public native @ByRef Output idx(); public native Unique idx(Output idx); } /** Finds unique elements along an axis of a tensor. * * This operation either returns a tensor {@code y} containing unique elements * along the {@code axis} of a tensor. The returned unique elements is sorted * in the same order as they occur along {@code axis} in {@code x}. * This operation also returns a tensor {@code idx} that is the same size as * the number of the elements in {@code x} along the {@code axis} dimension. It * contains the index in the unique output {@code y}. * In other words, for an {@code 1-D} tensor {@code x} with {@code axis = None: * * }y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]{@code * * For example: * * }{@code } * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] * y, idx = unique(x) * y ==> [1, 2, 4, 7, 8] * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] *
{@code
 * 
 *  For an `2-D` tensor `x` with `axis = 0`:
 * 
 *  }
* # tensor 'x' is [[1, 0, 0], * # [1, 0, 0], * # [2, 0, 0]] * y, idx = unique(x, axis=0) * y ==> [[1, 0, 0], * [2, 0, 0]] * idx ==> [0, 0, 1] *
{@code
 * 
 *  For an `2-D` tensor `x` with `axis = 1`:
 * 
 *  }
* # tensor 'x' is [[1, 0, 0], * # [1, 0, 0], * # [2, 0, 0]] * y, idx = unique(x, axis=1) * y ==> [[1, 0], * [1, 0], * [2, 0]] * idx ==> [0, 1, 1] *
{@code
 * 
 *  Arguments:
 *  * scope: A Scope object
 *  * x: A `Tensor`.
 *  * axis: A `Tensor` of type `int32` (default: None). The axis of the Tensor to
 *  find the unique elements.
 * 
 *  Returns:
 *  * `Output` y: A `Tensor`. Unique elements along the `axis` of `Tensor` x.
 *  * `Output` idx: A 1-D Tensor. Has the same type as x that contains the index of each
 *  value of x in the output y. */
@Namespace("tensorflow::ops") @NoOffset public static class UniqueV2 extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public UniqueV2(Pointer p) { super(p); }

  /** Optional attribute setters for UniqueV2 */
  public static class Attrs extends Pointer {
      static { Loader.load(); }
      /** Default native constructor. */
      public Attrs() { super((Pointer)null); allocate(); }
      /** Native array allocator. Access with {@link Pointer#position(long)}. */
      public Attrs(long size) { super((Pointer)null); allocateArray(size); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public Attrs(Pointer p) { super(p); }
      private native void allocate();
      private native void allocateArray(long size);
      @Override public Attrs position(long position) {
          return (Attrs)super.position(position);
      }
  
    /** Defaults to DT_INT32 */
    public native @ByVal Attrs OutIdx(@Cast("tensorflow::DataType") int x);

    public native @Cast("tensorflow::DataType") int out_idx_(); public native Attrs out_idx_(int out_idx_);
  }
  public UniqueV2(@Const @ByRef Scope scope, @ByVal Input x,
           @ByVal Input axis) { super((Pointer)null); allocate(scope, x, axis); }
  private native void allocate(@Const @ByRef Scope scope, @ByVal Input x,
           @ByVal Input axis);
  public UniqueV2(@Const @ByRef Scope scope, @ByVal Input x,
           @ByVal Input axis, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, x, axis, attrs); }
  private native void allocate(@Const @ByRef Scope scope, @ByVal Input x,
           @ByVal Input axis, @Const @ByRef Attrs attrs);

  public static native @ByVal Attrs OutIdx(@Cast("tensorflow::DataType") int x);

  public native @ByRef Operation operation(); public native UniqueV2 operation(Operation operation);
  public native @ByRef Output y(); public native UniqueV2 y(Output y);
  public native @ByRef Output idx(); public native UniqueV2 idx(Output idx);
}

/** Finds unique elements in a 1-D tensor.
 * 
 *  This operation returns a tensor {@code y} containing all of the unique elements of {@code x}
 *  sorted in the same order that they occur in {@code x}. This operation also returns a
 *  tensor {@code idx} the same size as {@code x} that contains the index of each value of {@code x}
 *  in the unique output {@code y}. Finally, it returns a third tensor {@code count} that
 *  contains the count of each element of {@code y} in {@code x}. In other words:
 * 
 *  {@code y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]}
 * 
 *  For example:
 * 
 *  
{@code
 *  # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
 *  y, idx, count = unique_with_counts(x)
 *  y ==> [1, 2, 4, 7, 8]
 *  idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
 *  count ==> [2, 1, 3, 1, 2]
 *  }
* * Arguments: * * scope: A Scope object * * x: 1-D. * * Returns: * * {@code Output} y: 1-D. * * {@code Output} idx: 1-D. * * {@code Output} count: 1-D. */ @Namespace("tensorflow::ops") @NoOffset public static class UniqueWithCounts extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UniqueWithCounts(Pointer p) { super(p); } /** Optional attribute setters for UniqueWithCounts */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_INT32 */ public native @ByVal Attrs OutIdx(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int out_idx_(); public native Attrs out_idx_(int out_idx_); } public UniqueWithCounts(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public UniqueWithCounts(@Const @ByRef Scope scope, @ByVal Input x, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, x, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @Const @ByRef Attrs attrs); public static native @ByVal Attrs OutIdx(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native UniqueWithCounts operation(Operation operation); public native @ByRef Output y(); public native UniqueWithCounts y(Output y); public native @ByRef Output idx(); public native UniqueWithCounts idx(Output idx); public native @ByRef Output count(); public native UniqueWithCounts count(Output count); } /** Finds unique elements along an axis of a tensor. * * This operation either returns a tensor {@code y} containing unique elements * along the {@code axis} of a tensor. The returned unique elements is sorted * in the same order as they occur along {@code axis} in {@code x}. * This operation also returns a tensor {@code idx} and a tensor {@code count} * that are the same size as the number of the elements in {@code x} along the * {@code axis} dimension. The {@code idx} contains the index in the unique output {@code y} * and the {@code count} contains the count in the unique output {@code y}. * In other words, for an {@code 1-D} tensor {@code x} with {@code axis = None: * * }y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]{@code * * For example: * * }{@code } * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] * y, idx, count = unique_with_counts(x) * y ==> [1, 2, 4, 7, 8] * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * count ==> [2, 1, 3, 1, 2] *
{@code
 * 
 *  For an `2-D` tensor `x` with `axis = 0`:
 * 
 *  }
* # tensor 'x' is [[1, 0, 0], * # [1, 0, 0], * # [2, 0, 0]] * y, idx, count = unique_with_counts(x, axis=0) * y ==> [[1, 0, 0], * [2, 0, 0]] * idx ==> [0, 0, 1] * count ==> [2, 1] *
{@code
 * 
 *  For an `2-D` tensor `x` with `axis = 1`:
 * 
 *  }
* # tensor 'x' is [[1, 0, 0], * # [1, 0, 0], * # [2, 0, 0]] * y, idx, count = unique_with_counts(x, axis=1) * y ==> [[1, 0], * [1, 0], * [2, 0]] * idx ==> [0, 1, 1] * count ==> [1, 2] *
{@code
 * 
 *  Arguments:
 *  * scope: A Scope object
 *  * x: A `Tensor`.
 *  * axis: A `Tensor` of type `int32` (default: None). The axis of the Tensor to
 *  find the unique elements.
 * 
 *  Returns:
 *  * `Output` y: A `Tensor`. Unique elements along the `axis` of `Tensor` x.
 *  * `Output` idx: A 1-D Tensor. Has the same type as x that contains the index of each
 *  value of x in the output y.
 *  * `Output` count: A 1-D Tensor. The count of each value of x in the output y. */
@Namespace("tensorflow::ops") @NoOffset public static class UniqueWithCountsV2 extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public UniqueWithCountsV2(Pointer p) { super(p); }

  /** Optional attribute setters for UniqueWithCountsV2 */
  public static class Attrs extends Pointer {
      static { Loader.load(); }
      /** Default native constructor. */
      public Attrs() { super((Pointer)null); allocate(); }
      /** Native array allocator. Access with {@link Pointer#position(long)}. */
      public Attrs(long size) { super((Pointer)null); allocateArray(size); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public Attrs(Pointer p) { super(p); }
      private native void allocate();
      private native void allocateArray(long size);
      @Override public Attrs position(long position) {
          return (Attrs)super.position(position);
      }
  
    /** Defaults to DT_INT32 */
    public native @ByVal Attrs OutIdx(@Cast("tensorflow::DataType") int x);

    public native @Cast("tensorflow::DataType") int out_idx_(); public native Attrs out_idx_(int out_idx_);
  }
  public UniqueWithCountsV2(@Const @ByRef Scope scope, @ByVal Input x,
                     @ByVal Input axis) { super((Pointer)null); allocate(scope, x, axis); }
  private native void allocate(@Const @ByRef Scope scope, @ByVal Input x,
                     @ByVal Input axis);
  public UniqueWithCountsV2(@Const @ByRef Scope scope, @ByVal Input x,
                     @ByVal Input axis, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, x, axis, attrs); }
  private native void allocate(@Const @ByRef Scope scope, @ByVal Input x,
                     @ByVal Input axis, @Const @ByRef Attrs attrs);

  public static native @ByVal Attrs OutIdx(@Cast("tensorflow::DataType") int x);

  public native @ByRef Operation operation(); public native UniqueWithCountsV2 operation(Operation operation);
  public native @ByRef Output y(); public native UniqueWithCountsV2 y(Output y);
  public native @ByRef Output idx(); public native UniqueWithCountsV2 idx(Output idx);
  public native @ByRef Output count(); public native UniqueWithCountsV2 count(Output count);
}

/** Unpacks a given dimension of a rank-{@code R} tensor into {@code num} rank-{@code (R-1)} tensors.
 * 
 *  Unpacks {@code num} tensors from {@code value} by chipping it along the {@code axis} dimension.
 *  For example, given a tensor of shape {@code (A, B, C, D)};
 * 
 *  If {@code axis == 0} then the i'th tensor in {@code output} is the slice {@code value[i, :, :, :]}
 *    and each tensor in {@code output} will have shape {@code (B, C, D)}. (Note that the
 *    dimension unpacked along is gone, unlike {@code split}).
 * 
 *  If {@code axis == 1} then the i'th tensor in {@code output} is the slice {@code value[:, i, :, :]}
 *    and each tensor in {@code output} will have shape {@code (A, C, D)}.
 *  Etc.
 * 
 *  This is the opposite of {@code pack}.
 * 
 *  Arguments:
 *  * scope: A Scope object
 *  * value: 1-D or higher, with {@code axis} dimension size equal to {@code num}.
 * 
 *  Optional attributes (see {@code Attrs}):
 *  * axis: Dimension along which to unpack.  Negative values wrap around, so the
 *  valid range is {@code [-R, R)}.
 * 
 *  Returns:
 *  * {@code OutputList}: The list of tensors unpacked from {@code value}. */
@Namespace("tensorflow::ops") @NoOffset public static class Unstack extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public Unstack(Pointer p) { super(p); }

  /** Optional attribute setters for Unstack */
  public static class Attrs extends Pointer {
      static { Loader.load(); }
      /** Default native constructor. */
      public Attrs() { super((Pointer)null); allocate(); }
      /** Native array allocator. Access with {@link Pointer#position(long)}. */
      public Attrs(long size) { super((Pointer)null); allocateArray(size); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public Attrs(Pointer p) { super(p); }
      private native void allocate();
      private native void allocateArray(long size);
      @Override public Attrs position(long position) {
          return (Attrs)super.position(position);
      }
  
    /** Dimension along which to unpack.  Negative values wrap around, so the
     *  valid range is {@code [-R, R)}.
     * 
     *  Defaults to 0 */
    public native @ByVal Attrs Axis(@Cast("tensorflow::int64") long x);

    public native @Cast("tensorflow::int64") long axis_(); public native Attrs axis_(long axis_);
  }
  public Unstack(@Const @ByRef Scope scope, @ByVal Input value, @Cast("tensorflow::int64") long num) { super((Pointer)null); allocate(scope, value, num); }
  private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @Cast("tensorflow::int64") long num);
  public Unstack(@Const @ByRef Scope scope, @ByVal Input value, @Cast("tensorflow::int64") long num,
          @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, value, num, attrs); }
  private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @Cast("tensorflow::int64") long num,
          @Const @ByRef Attrs attrs);
  public native @ByVal @Name("operator []") Output get(@Cast("size_t") long index);


  public static native @ByVal Attrs Axis(@Cast("tensorflow::int64") long x);

  public native @ByRef Operation operation(); public native Unstack operation(Operation operation);
  public native @ByRef @Cast("tensorflow::OutputList*") OutputVector output(); public native Unstack output(OutputVector output);
}

/** Converts a flat index or array of flat indices into a tuple of
 * 
 *  coordinate arrays.
 * 
 *  \compatibility(numpy)
 *  Equivalent to np.unravel_index
 *  \end_compatibility
 * 
 *  Arguments:
 *  * scope: A Scope object
 *  * indices: An 0-D or 1-D {@code int} Tensor whose elements are indices into the
 *  flattened version of an array of dimensions dims.
 *  * dims: An 1-D {@code int} Tensor. The shape of the array to use for unraveling
 *  indices.
 * 
 *  Returns:
 *  * {@code Output}: An 2-D (or 1-D if indices is 0-D) tensor where each row has the
 *  same shape as the indices array. */
@Namespace("tensorflow::ops") @NoOffset public static class UnravelIndex extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public UnravelIndex(Pointer p) { super(p); }

  public UnravelIndex(@Const @ByRef Scope scope, @ByVal Input indices,
               @ByVal Input dims) { super((Pointer)null); allocate(scope, indices, dims); }
  private native void allocate(@Const @ByRef Scope scope, @ByVal Input indices,
               @ByVal Input dims);
  public native @ByVal @Name("operator tensorflow::Output") Output asOutput();
  public native @ByVal @Name("operator tensorflow::Input") Input asInput();
  public native Node node();

  public native @ByRef Operation operation(); public native UnravelIndex operation(Operation operation);
  public native @ByRef Output output(); public native UnravelIndex output(Output output);
}

/** Returns locations of nonzero / true values in a tensor.
 * 
 *  This operation returns the coordinates of true elements in {@code condition}. The
 *  coordinates are returned in a 2-D tensor where the first dimension (rows)
 *  represents the number of true elements, and the second dimension (columns)
 *  represents the coordinates of the true elements. Keep in mind, the shape of
 *  the output tensor can vary depending on how many true values there are in
 *  {@code condition}. Indices are output in row-major order.
 * 
 *  For example:
 * 
 *  
{@code
 *  # 'input' tensor is [[True, False]
 *  #                    [True, False]]
 *  # 'input' has two true values, so output has two coordinates.
 *  # 'input' has rank of 2, so coordinates have two indices.
 *  where(input) ==> [[0, 0],
 *                    [1, 0]]
 * 
 *  # `condition` tensor is [[[True, False]
 *  #                     [True, False]]
 *  #                    [[False, True]
 *  #                     [False, True]]
 *  #                    [[False, False]
 *  #                     [False, True]]]
 *  # 'input' has 5 true values, so output has 5 coordinates.
 *  # 'input' has rank of 3, so coordinates have three indices.
 *  where(input) ==> [[0, 0, 0],
 *                    [0, 1, 0],
 *                    [1, 0, 1],
 *                    [1, 1, 1],
 *                    [2, 1, 1]]
 * 
 *  # `condition` tensor is [[[1.5,  0.0]
 *  #                     [-0.5, 0.0]]
 *  #                    [[0.0,  0.25]
 *  #                     [0.0,  0.75]]
 *  #                    [[0.0,  0.0]
 *  #                     [0.0,  0.01]]]
 *  # 'input' has 5 nonzero values, so output has 5 coordinates.
 *  # 'input' has rank of 3, so coordinates have three indices.
 *  where(input) ==> [[0, 0, 0],
 *                    [0, 1, 0],
 *                    [1, 0, 1],
 *                    [1, 1, 1],
 *                    [2, 1, 1]]
 * 
 *  # `condition` tensor is [[[1.5 + 0.0j, 0.0  + 0.0j]
 *  #                     [0.0 + 0.5j, 0.0  + 0.0j]]
 *  #                    [[0.0 + 0.0j, 0.25 + 1.5j]
 *  #                     [0.0 + 0.0j, 0.75 + 0.0j]]
 *  #                    [[0.0 + 0.0j, 0.0  + 0.0j]
 *  #                     [0.0 + 0.0j, 0.01 + 0.0j]]]
 *  # 'input' has 5 nonzero magnitude values, so output has 5 coordinates.
 *  # 'input' has rank of 3, so coordinates have three indices.
 *  where(input) ==> [[0, 0, 0],
 *                    [0, 1, 0],
 *                    [1, 0, 1],
 *                    [1, 1, 1],
 *                    [2, 1, 1]]
 *  }
* * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The index tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Where extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Where(Pointer p) { super(p); } public Where(@Const @ByRef Scope scope, @ByVal Input condition) { super((Pointer)null); allocate(scope, condition); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input condition); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Where operation(Operation operation); public native @ByRef Output index(); public native Where index(Output index); } /** Returns a tensor of zeros with the same shape and type as x. * * Arguments: * * scope: A Scope object * * x: a tensor of type T. * * Returns: * * {@code Output}: a tensor of the same shape and type as x but filled with zeros. */ @Namespace("tensorflow::ops") @NoOffset public static class ZerosLike extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ZerosLike(Pointer p) { super(p); } public ZerosLike(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native ZerosLike operation(Operation operation); public native @ByRef Output y(); public native ZerosLike y(Output y); } /** \} */ // namespace ops // namespace tensorflow // #endif // TENSORFLOW_CC_OPS_ARRAY_OPS_H_ // Parsed from tensorflow/cc/ops/candidate_sampling_ops.h // This file is MACHINE GENERATED! Do not edit. // #ifndef TENSORFLOW_CC_OPS_CANDIDATE_SAMPLING_OPS_H_ // #define TENSORFLOW_CC_OPS_CANDIDATE_SAMPLING_OPS_H_ // This file is MACHINE GENERATED! Do not edit. // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/cc/framework/scope.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/gtl/array_slice.h" /** \defgroup candidate_sampling_ops Candidate Sampling Ops * \{

* Generates labels for candidate sampling with a learned unigram distribution. * * See explanations of candidate sampling and the data formats at * go/candidate-sampling. * * For each batch, this op picks a single set of sampled candidate labels. * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. * * Arguments: * * scope: A Scope object * * true_classes: A batch_size * num_true matrix, in which each row contains the * IDs of the num_true target_classes in the corresponding original label. * * num_true: Number of true labels per context. * * num_sampled: Number of candidates to produce. * * unique: If unique is true, we sample with rejection, so that all sampled * candidates in a batch are unique. This requires some approximation to * estimate the post-rejection sampling probabilities. * * Optional attributes (see {@code Attrs}): * * seed: If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * seed2: An second seed to avoid seed collision. * * Returns: * * {@code Output} sampled_candidates: A vector of length num_sampled, in which each element is * the ID of a sampled candidate. * * {@code Output} true_expected_count: A batch_size * num_true matrix, representing * the number of times each candidate is expected to occur in a batch * of sampled candidates. If unique=true, then this is a probability. * * {@code Output} sampled_expected_count: A vector of length num_sampled, for each sampled * candidate representing the number of times the candidate is expected * to occur in a batch of sampled candidates. If unique=true, then this is a * probability. */ @Namespace("tensorflow::ops") @NoOffset public static class AllCandidateSampler extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AllCandidateSampler(Pointer p) { super(p); } /** Optional attribute setters for AllCandidateSampler */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * Defaults to 0 */ /// public native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); /** An second seed to avoid seed collision. * * Defaults to 0 */ public native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long seed_(); public native Attrs seed_(long seed_); public native @Cast("tensorflow::int64") long seed2_(); public native Attrs seed2_(long seed2_); } public AllCandidateSampler(@Const @ByRef Scope scope, @ByVal Input true_classes, @Cast("tensorflow::int64") long num_true, @Cast("tensorflow::int64") long num_sampled, @Cast("bool") boolean unique) { super((Pointer)null); allocate(scope, true_classes, num_true, num_sampled, unique); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input true_classes, @Cast("tensorflow::int64") long num_true, @Cast("tensorflow::int64") long num_sampled, @Cast("bool") boolean unique); public AllCandidateSampler(@Const @ByRef Scope scope, @ByVal Input true_classes, @Cast("tensorflow::int64") long num_true, @Cast("tensorflow::int64") long num_sampled, @Cast("bool") boolean unique, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, true_classes, num_true, num_sampled, unique, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input true_classes, @Cast("tensorflow::int64") long num_true, @Cast("tensorflow::int64") long num_sampled, @Cast("bool") boolean unique, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native AllCandidateSampler operation(Operation operation); public native @ByRef Output sampled_candidates(); public native AllCandidateSampler sampled_candidates(Output sampled_candidates); public native @ByRef Output true_expected_count(); public native AllCandidateSampler true_expected_count(Output true_expected_count); public native @ByRef Output sampled_expected_count(); public native AllCandidateSampler sampled_expected_count(Output sampled_expected_count); } /** Computes the ids of the positions in sampled_candidates that match true_labels. * * When doing log-odds NCE, the result of this op should be passed through a * SparseToDense op, then added to the logits of the sampled candidates. This has * the effect of 'removing' the sampled labels that match the true labels by * making the classifier sure that they are sampled labels. * * Arguments: * * scope: A Scope object * * true_classes: The true_classes output of UnpackSparseLabels. * * sampled_candidates: The sampled_candidates output of CandidateSampler. * * num_true: Number of true labels per context. * * Optional attributes (see {@code Attrs}): * * seed: If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * seed2: An second seed to avoid seed collision. * * Returns: * * {@code Output} indices: A vector of indices corresponding to rows of true_candidates. * * {@code Output} ids: A vector of IDs of positions in sampled_candidates that match a true_label * for the row with the corresponding index in indices. * * {@code Output} weights: A vector of the same length as indices and ids, in which each element * is -FLOAT_MAX. */ @Namespace("tensorflow::ops") @NoOffset public static class ComputeAccidentalHits extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ComputeAccidentalHits(Pointer p) { super(p); } /** Optional attribute setters for ComputeAccidentalHits */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * Defaults to 0 */ /// public native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); /** An second seed to avoid seed collision. * * Defaults to 0 */ public native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long seed_(); public native Attrs seed_(long seed_); public native @Cast("tensorflow::int64") long seed2_(); public native Attrs seed2_(long seed2_); } public ComputeAccidentalHits(@Const @ByRef Scope scope, @ByVal Input true_classes, @ByVal Input sampled_candidates, @Cast("tensorflow::int64") long num_true) { super((Pointer)null); allocate(scope, true_classes, sampled_candidates, num_true); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input true_classes, @ByVal Input sampled_candidates, @Cast("tensorflow::int64") long num_true); public ComputeAccidentalHits(@Const @ByRef Scope scope, @ByVal Input true_classes, @ByVal Input sampled_candidates, @Cast("tensorflow::int64") long num_true, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, true_classes, sampled_candidates, num_true, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input true_classes, @ByVal Input sampled_candidates, @Cast("tensorflow::int64") long num_true, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native ComputeAccidentalHits operation(Operation operation); public native @ByRef Output indices(); public native ComputeAccidentalHits indices(Output indices); public native @ByRef Output ids(); public native ComputeAccidentalHits ids(Output ids); public native @ByRef Output weights(); public native ComputeAccidentalHits weights(Output weights); } /** Generates labels for candidate sampling with a learned unigram distribution. * * A unigram sampler could use a fixed unigram distribution read from a * file or passed in as an in-memory array instead of building up the distribution * from data on the fly. There is also an option to skew the distribution by * applying a distortion power to the weights. * * The vocabulary file should be in CSV-like format, with the last field * being the weight associated with the word. * * For each batch, this op picks a single set of sampled candidate labels. * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. * * Arguments: * * scope: A Scope object * * true_classes: A batch_size * num_true matrix, in which each row contains the * IDs of the num_true target_classes in the corresponding original label. * * num_true: Number of true labels per context. * * num_sampled: Number of candidates to randomly sample. * * unique: If unique is true, we sample with rejection, so that all sampled * candidates in a batch are unique. This requires some approximation to * estimate the post-rejection sampling probabilities. * * range_max: The sampler will sample integers from the interval [0, range_max). * * Optional attributes (see {@code Attrs}): * * vocab_file: Each valid line in this file (which should have a CSV-like format) * corresponds to a valid word ID. IDs are in sequential order, starting from * num_reserved_ids. The last entry in each line is expected to be a value * corresponding to the count or relative probability. Exactly one of vocab_file * and unigrams needs to be passed to this op. * * distortion: The distortion is used to skew the unigram probability distribution. * Each weight is first raised to the distortion's power before adding to the * internal unigram distribution. As a result, distortion = 1.0 gives regular * unigram sampling (as defined by the vocab file), and distortion = 0.0 gives * a uniform distribution. * * num_reserved_ids: Optionally some reserved IDs can be added in the range [0, * ..., num_reserved_ids) by the users. One use case is that a special unknown * word token is used as ID 0. These IDs will have a sampling probability of 0. * * num_shards: A sampler can be used to sample from a subset of the original range * in order to speed up the whole computation through parallelism. This parameter * (together with 'shard') indicates the number of partitions that are being * used in the overall computation. * * shard: A sampler can be used to sample from a subset of the original range * in order to speed up the whole computation through parallelism. This parameter * (together with 'num_shards') indicates the particular partition number of a * sampler op, when partitioning is being used. * * unigrams: A list of unigram counts or probabilities, one per ID in sequential * order. Exactly one of vocab_file and unigrams should be passed to this op. * * seed: If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * seed2: An second seed to avoid seed collision. * * Returns: * * {@code Output} sampled_candidates: A vector of length num_sampled, in which each element is * the ID of a sampled candidate. * * {@code Output} true_expected_count: A batch_size * num_true matrix, representing * the number of times each candidate is expected to occur in a batch * of sampled candidates. If unique=true, then this is a probability. * * {@code Output} sampled_expected_count: A vector of length num_sampled, for each sampled * candidate representing the number of times the candidate is expected * to occur in a batch of sampled candidates. If unique=true, then this is a * probability. */ @Namespace("tensorflow::ops") @NoOffset public static class FixedUnigramCandidateSampler extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FixedUnigramCandidateSampler(Pointer p) { super(p); } /** Optional attribute setters for FixedUnigramCandidateSampler */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Each valid line in this file (which should have a CSV-like format) * corresponds to a valid word ID. IDs are in sequential order, starting from * num_reserved_ids. The last entry in each line is expected to be a value * corresponding to the count or relative probability. Exactly one of vocab_file * and unigrams needs to be passed to this op. * * Defaults to "" */ /// public native @ByVal Attrs VocabFile(@StringPiece BytePointer x); public native @ByVal Attrs VocabFile(@StringPiece String x); /** The distortion is used to skew the unigram probability distribution. * Each weight is first raised to the distortion's power before adding to the * internal unigram distribution. As a result, distortion = 1.0 gives regular * unigram sampling (as defined by the vocab file), and distortion = 0.0 gives * a uniform distribution. * * Defaults to 1 */ /// public native @ByVal Attrs Distortion(float x); /** Optionally some reserved IDs can be added in the range [0, * ..., num_reserved_ids) by the users. One use case is that a special unknown * word token is used as ID 0. These IDs will have a sampling probability of 0. * * Defaults to 0 */ /// public native @ByVal Attrs NumReservedIds(@Cast("tensorflow::int64") long x); /** A sampler can be used to sample from a subset of the original range * in order to speed up the whole computation through parallelism. This parameter * (together with 'shard') indicates the number of partitions that are being * used in the overall computation. * * Defaults to 1 */ /// public native @ByVal Attrs NumShards(@Cast("tensorflow::int64") long x); /** A sampler can be used to sample from a subset of the original range * in order to speed up the whole computation through parallelism. This parameter * (together with 'num_shards') indicates the particular partition number of a * sampler op, when partitioning is being used. * * Defaults to 0 */ /// public native @ByVal Attrs Shard(@Cast("tensorflow::int64") long x); /** A list of unigram counts or probabilities, one per ID in sequential * order. Exactly one of vocab_file and unigrams should be passed to this op. * * Defaults to [] */ /// public native @ByVal Attrs Unigrams(@ArraySlice FloatPointer x); public native @ByVal Attrs Unigrams(@ArraySlice FloatBuffer x); public native @ByVal Attrs Unigrams(@ArraySlice float... x); /** If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * Defaults to 0 */ /// public native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); /** An second seed to avoid seed collision. * * Defaults to 0 */ public native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @StringPiece BytePointer vocab_file_(); public native Attrs vocab_file_(BytePointer vocab_file_); public native float distortion_(); public native Attrs distortion_(float distortion_); public native @Cast("tensorflow::int64") long num_reserved_ids_(); public native Attrs num_reserved_ids_(long num_reserved_ids_); public native @Cast("tensorflow::int64") long num_shards_(); public native Attrs num_shards_(long num_shards_); public native @Cast("tensorflow::int64") long shard_(); public native Attrs shard_(long shard_); public native @ArraySlice FloatPointer unigrams_(); public native Attrs unigrams_(FloatPointer unigrams_); public native @Cast("tensorflow::int64") long seed_(); public native Attrs seed_(long seed_); public native @Cast("tensorflow::int64") long seed2_(); public native Attrs seed2_(long seed2_); } public FixedUnigramCandidateSampler(@Const @ByRef Scope scope, @ByVal Input true_classes, @Cast("tensorflow::int64") long num_true, @Cast("tensorflow::int64") long num_sampled, @Cast("bool") boolean unique, @Cast("tensorflow::int64") long range_max) { super((Pointer)null); allocate(scope, true_classes, num_true, num_sampled, unique, range_max); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input true_classes, @Cast("tensorflow::int64") long num_true, @Cast("tensorflow::int64") long num_sampled, @Cast("bool") boolean unique, @Cast("tensorflow::int64") long range_max); public FixedUnigramCandidateSampler(@Const @ByRef Scope scope, @ByVal Input true_classes, @Cast("tensorflow::int64") long num_true, @Cast("tensorflow::int64") long num_sampled, @Cast("bool") boolean unique, @Cast("tensorflow::int64") long range_max, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, true_classes, num_true, num_sampled, unique, range_max, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input true_classes, @Cast("tensorflow::int64") long num_true, @Cast("tensorflow::int64") long num_sampled, @Cast("bool") boolean unique, @Cast("tensorflow::int64") long range_max, @Const @ByRef Attrs attrs); public static native @ByVal Attrs VocabFile(@StringPiece BytePointer x); public static native @ByVal Attrs VocabFile(@StringPiece String x); public static native @ByVal Attrs Distortion(float x); public static native @ByVal Attrs NumReservedIds(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs NumShards(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Shard(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Unigrams(@ArraySlice FloatPointer x); public static native @ByVal Attrs Unigrams(@ArraySlice FloatBuffer x); public static native @ByVal Attrs Unigrams(@ArraySlice float... x); public static native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native FixedUnigramCandidateSampler operation(Operation operation); public native @ByRef Output sampled_candidates(); public native FixedUnigramCandidateSampler sampled_candidates(Output sampled_candidates); public native @ByRef Output true_expected_count(); public native FixedUnigramCandidateSampler true_expected_count(Output true_expected_count); public native @ByRef Output sampled_expected_count(); public native FixedUnigramCandidateSampler sampled_expected_count(Output sampled_expected_count); } /** Generates labels for candidate sampling with a learned unigram distribution. * * See explanations of candidate sampling and the data formats at * go/candidate-sampling. * * For each batch, this op picks a single set of sampled candidate labels. * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. * * Arguments: * * scope: A Scope object * * true_classes: A batch_size * num_true matrix, in which each row contains the * IDs of the num_true target_classes in the corresponding original label. * * num_true: Number of true labels per context. * * num_sampled: Number of candidates to randomly sample. * * unique: If unique is true, we sample with rejection, so that all sampled * candidates in a batch are unique. This requires some approximation to * estimate the post-rejection sampling probabilities. * * range_max: The sampler will sample integers from the interval [0, range_max). * * Optional attributes (see {@code Attrs}): * * seed: If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * seed2: An second seed to avoid seed collision. * * Returns: * * {@code Output} sampled_candidates: A vector of length num_sampled, in which each element is * the ID of a sampled candidate. * * {@code Output} true_expected_count: A batch_size * num_true matrix, representing * the number of times each candidate is expected to occur in a batch * of sampled candidates. If unique=true, then this is a probability. * * {@code Output} sampled_expected_count: A vector of length num_sampled, for each sampled * candidate representing the number of times the candidate is expected * to occur in a batch of sampled candidates. If unique=true, then this is a * probability. */ @Namespace("tensorflow::ops") @NoOffset public static class LearnedUnigramCandidateSampler extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LearnedUnigramCandidateSampler(Pointer p) { super(p); } /** Optional attribute setters for LearnedUnigramCandidateSampler */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * Defaults to 0 */ /// public native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); /** An second seed to avoid seed collision. * * Defaults to 0 */ public native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long seed_(); public native Attrs seed_(long seed_); public native @Cast("tensorflow::int64") long seed2_(); public native Attrs seed2_(long seed2_); } public LearnedUnigramCandidateSampler(@Const @ByRef Scope scope, @ByVal Input true_classes, @Cast("tensorflow::int64") long num_true, @Cast("tensorflow::int64") long num_sampled, @Cast("bool") boolean unique, @Cast("tensorflow::int64") long range_max) { super((Pointer)null); allocate(scope, true_classes, num_true, num_sampled, unique, range_max); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input true_classes, @Cast("tensorflow::int64") long num_true, @Cast("tensorflow::int64") long num_sampled, @Cast("bool") boolean unique, @Cast("tensorflow::int64") long range_max); public LearnedUnigramCandidateSampler(@Const @ByRef Scope scope, @ByVal Input true_classes, @Cast("tensorflow::int64") long num_true, @Cast("tensorflow::int64") long num_sampled, @Cast("bool") boolean unique, @Cast("tensorflow::int64") long range_max, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, true_classes, num_true, num_sampled, unique, range_max, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input true_classes, @Cast("tensorflow::int64") long num_true, @Cast("tensorflow::int64") long num_sampled, @Cast("bool") boolean unique, @Cast("tensorflow::int64") long range_max, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native LearnedUnigramCandidateSampler operation(Operation operation); public native @ByRef Output sampled_candidates(); public native LearnedUnigramCandidateSampler sampled_candidates(Output sampled_candidates); public native @ByRef Output true_expected_count(); public native LearnedUnigramCandidateSampler true_expected_count(Output true_expected_count); public native @ByRef Output sampled_expected_count(); public native LearnedUnigramCandidateSampler sampled_expected_count(Output sampled_expected_count); } /** Generates labels for candidate sampling with a log-uniform distribution. * * See explanations of candidate sampling and the data formats at * go/candidate-sampling. * * For each batch, this op picks a single set of sampled candidate labels. * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. * * Arguments: * * scope: A Scope object * * true_classes: A batch_size * num_true matrix, in which each row contains the * IDs of the num_true target_classes in the corresponding original label. * * num_true: Number of true labels per context. * * num_sampled: Number of candidates to randomly sample. * * unique: If unique is true, we sample with rejection, so that all sampled * candidates in a batch are unique. This requires some approximation to * estimate the post-rejection sampling probabilities. * * range_max: The sampler will sample integers from the interval [0, range_max). * * Optional attributes (see {@code Attrs}): * * seed: If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * seed2: An second seed to avoid seed collision. * * Returns: * * {@code Output} sampled_candidates: A vector of length num_sampled, in which each element is * the ID of a sampled candidate. * * {@code Output} true_expected_count: A batch_size * num_true matrix, representing * the number of times each candidate is expected to occur in a batch * of sampled candidates. If unique=true, then this is a probability. * * {@code Output} sampled_expected_count: A vector of length num_sampled, for each sampled * candidate representing the number of times the candidate is expected * to occur in a batch of sampled candidates. If unique=true, then this is a * probability. */ @Namespace("tensorflow::ops") @NoOffset public static class LogUniformCandidateSampler extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LogUniformCandidateSampler(Pointer p) { super(p); } /** Optional attribute setters for LogUniformCandidateSampler */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * Defaults to 0 */ /// public native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); /** An second seed to avoid seed collision. * * Defaults to 0 */ public native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long seed_(); public native Attrs seed_(long seed_); public native @Cast("tensorflow::int64") long seed2_(); public native Attrs seed2_(long seed2_); } public LogUniformCandidateSampler(@Const @ByRef Scope scope, @ByVal Input true_classes, @Cast("tensorflow::int64") long num_true, @Cast("tensorflow::int64") long num_sampled, @Cast("bool") boolean unique, @Cast("tensorflow::int64") long range_max) { super((Pointer)null); allocate(scope, true_classes, num_true, num_sampled, unique, range_max); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input true_classes, @Cast("tensorflow::int64") long num_true, @Cast("tensorflow::int64") long num_sampled, @Cast("bool") boolean unique, @Cast("tensorflow::int64") long range_max); public LogUniformCandidateSampler(@Const @ByRef Scope scope, @ByVal Input true_classes, @Cast("tensorflow::int64") long num_true, @Cast("tensorflow::int64") long num_sampled, @Cast("bool") boolean unique, @Cast("tensorflow::int64") long range_max, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, true_classes, num_true, num_sampled, unique, range_max, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input true_classes, @Cast("tensorflow::int64") long num_true, @Cast("tensorflow::int64") long num_sampled, @Cast("bool") boolean unique, @Cast("tensorflow::int64") long range_max, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native LogUniformCandidateSampler operation(Operation operation); public native @ByRef Output sampled_candidates(); public native LogUniformCandidateSampler sampled_candidates(Output sampled_candidates); public native @ByRef Output true_expected_count(); public native LogUniformCandidateSampler true_expected_count(Output true_expected_count); public native @ByRef Output sampled_expected_count(); public native LogUniformCandidateSampler sampled_expected_count(Output sampled_expected_count); } /** Generates labels for candidate sampling with a uniform distribution. * * See explanations of candidate sampling and the data formats at * go/candidate-sampling. * * For each batch, this op picks a single set of sampled candidate labels. * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. * * Arguments: * * scope: A Scope object * * true_classes: A batch_size * num_true matrix, in which each row contains the * IDs of the num_true target_classes in the corresponding original label. * * num_true: Number of true labels per context. * * num_sampled: Number of candidates to randomly sample. * * unique: If unique is true, we sample with rejection, so that all sampled * candidates in a batch are unique. This requires some approximation to * estimate the post-rejection sampling probabilities. * * range_max: The sampler will sample integers from the interval [0, range_max). * * Optional attributes (see {@code Attrs}): * * seed: If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * seed2: An second seed to avoid seed collision. * * Returns: * * {@code Output} sampled_candidates: A vector of length num_sampled, in which each element is * the ID of a sampled candidate. * * {@code Output} true_expected_count: A batch_size * num_true matrix, representing * the number of times each candidate is expected to occur in a batch * of sampled candidates. If unique=true, then this is a probability. * * {@code Output} sampled_expected_count: A vector of length num_sampled, for each sampled * candidate representing the number of times the candidate is expected * to occur in a batch of sampled candidates. If unique=true, then this is a * probability. */ @Namespace("tensorflow::ops") @NoOffset public static class UniformCandidateSampler extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UniformCandidateSampler(Pointer p) { super(p); } /** Optional attribute setters for UniformCandidateSampler */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * Defaults to 0 */ /// public native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); /** An second seed to avoid seed collision. * * Defaults to 0 */ public native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long seed_(); public native Attrs seed_(long seed_); public native @Cast("tensorflow::int64") long seed2_(); public native Attrs seed2_(long seed2_); } public UniformCandidateSampler(@Const @ByRef Scope scope, @ByVal Input true_classes, @Cast("tensorflow::int64") long num_true, @Cast("tensorflow::int64") long num_sampled, @Cast("bool") boolean unique, @Cast("tensorflow::int64") long range_max) { super((Pointer)null); allocate(scope, true_classes, num_true, num_sampled, unique, range_max); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input true_classes, @Cast("tensorflow::int64") long num_true, @Cast("tensorflow::int64") long num_sampled, @Cast("bool") boolean unique, @Cast("tensorflow::int64") long range_max); public UniformCandidateSampler(@Const @ByRef Scope scope, @ByVal Input true_classes, @Cast("tensorflow::int64") long num_true, @Cast("tensorflow::int64") long num_sampled, @Cast("bool") boolean unique, @Cast("tensorflow::int64") long range_max, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, true_classes, num_true, num_sampled, unique, range_max, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input true_classes, @Cast("tensorflow::int64") long num_true, @Cast("tensorflow::int64") long num_sampled, @Cast("bool") boolean unique, @Cast("tensorflow::int64") long range_max, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native UniformCandidateSampler operation(Operation operation); public native @ByRef Output sampled_candidates(); public native UniformCandidateSampler sampled_candidates(Output sampled_candidates); public native @ByRef Output true_expected_count(); public native UniformCandidateSampler true_expected_count(Output true_expected_count); public native @ByRef Output sampled_expected_count(); public native UniformCandidateSampler sampled_expected_count(Output sampled_expected_count); } /** \} */ // namespace ops // namespace tensorflow // #endif // TENSORFLOW_CC_OPS_CANDIDATE_SAMPLING_OPS_H_ // Parsed from tensorflow/cc/ops/control_flow_ops.h // This file is MACHINE GENERATED! Do not edit. // #ifndef TENSORFLOW_CC_OPS_CONTROL_FLOW_OPS_H_ // #define TENSORFLOW_CC_OPS_CONTROL_FLOW_OPS_H_ // This file is MACHINE GENERATED! Do not edit. // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/cc/framework/scope.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/gtl/array_slice.h" /** \defgroup control_flow_ops Control Flow Ops * \{

* Raise a exception to abort the process when called. * * If exit_without_error is true, the process will exit normally, * otherwise it will exit with a SIGABORT signal. * * Returns nothing but an exception. * * Arguments: * * scope: A Scope object * * Optional attributes (see {@code Attrs}): * * error_msg: A string which is the message associated with the exception. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class Abort extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Abort(Pointer p) { super(p); } /** Optional attribute setters for Abort */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** A string which is the message associated with the exception. * * Defaults to "" */ public native @ByVal Attrs ErrorMsg(@StringPiece BytePointer x); public native @ByVal Attrs ErrorMsg(@StringPiece String x); /** Defaults to false */ public native @ByVal Attrs ExitWithoutError(@Cast("bool") boolean x); public native @StringPiece BytePointer error_msg_(); public native Attrs error_msg_(BytePointer error_msg_); public native @Cast("bool") boolean exit_without_error_(); public native Attrs exit_without_error_(boolean exit_without_error_); } public Abort(@Const @ByRef Scope scope) { super((Pointer)null); allocate(scope); } private native void allocate(@Const @ByRef Scope scope); public Abort(@Const @ByRef Scope scope, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, attrs); } private native void allocate(@Const @ByRef Scope scope, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs ErrorMsg(@StringPiece BytePointer x); public static native @ByVal Attrs ErrorMsg(@StringPiece String x); public static native @ByVal Attrs ExitWithoutError(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native Abort operation(Operation operation); } /** Does nothing. Serves as a control trigger for scheduling. * * Only useful as a placeholder for control edges. * * Arguments: * * scope: A Scope object * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ControlTrigger extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ControlTrigger(Pointer p) { super(p); } public ControlTrigger(@Const @ByRef Scope scope) { super((Pointer)null); allocate(scope); } private native void allocate(@Const @ByRef Scope scope); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public native @ByRef Operation operation(); public native ControlTrigger operation(Operation operation); } /** Forwards the input to the output. * * This operator represents the loop termination condition used by the * "pivot" switches of a loop. * * Arguments: * * scope: A Scope object * * input: A boolean scalar, representing the branch predicate of the Switch op. * * Returns: * * {@code Output}: The same tensor as {@code input}. */ @Namespace("tensorflow::ops") @NoOffset public static class LoopCond extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LoopCond(Pointer p) { super(p); } public LoopCond(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native LoopCond operation(Operation operation); public native @ByRef Output output(); public native LoopCond output(Output output); } /** Forwards the value of an available tensor from {@code inputs} to {@code output}. * * {@code Merge} waits for at least one of the tensors in {@code inputs} to become available. * It is usually combined with {@code Switch} to implement branching. * * {@code Merge} forwards the first tensor to become available to {@code output}, and sets * {@code value_index} to its index in {@code inputs}. * * Arguments: * * scope: A Scope object * * inputs: The input tensors, exactly one of which will become available. * * Returns: * * {@code Output} output: Will be set to the available input tensor. * * {@code Output} value_index: The index of the chosen input tensor in {@code inputs}. */ @Namespace("tensorflow::ops") @NoOffset public static class Merge extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Merge(Pointer p) { super(p); } public Merge(@Const @ByRef Scope scope, @ByVal InputList inputs) { super((Pointer)null); allocate(scope, inputs); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList inputs); public native @ByRef Operation operation(); public native Merge operation(Operation operation); public native @ByRef Output output(); public native Merge output(Output output); public native @ByRef Output value_index(); public native Merge value_index(Output value_index); } /** Makes its input available to the next iteration. * * Arguments: * * scope: A Scope object * * data: The tensor to be made available to the next iteration. * * Returns: * * {@code Output}: The same tensor as {@code data}. */ @Namespace("tensorflow::ops") @NoOffset public static class NextIteration extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NextIteration(Pointer p) { super(p); } public NextIteration(@Const @ByRef Scope scope, @ByVal Input data) { super((Pointer)null); allocate(scope, data); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input data); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native NextIteration operation(Operation operation); public native @ByRef Output output(); public native NextIteration output(Output output); } /** Makes its input available to the next iteration. * * Arguments: * * scope: A Scope object * * data: The tensor to be made available to the next iteration. * * Returns: * * {@code Output}: The same tensor as {@code data}. */ @Namespace("tensorflow::ops") @NoOffset public static class RefNextIteration extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RefNextIteration(Pointer p) { super(p); } public RefNextIteration(@Const @ByRef Scope scope, @ByVal Input data) { super((Pointer)null); allocate(scope, data); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input data); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native RefNextIteration operation(Operation operation); public native @ByRef Output output(); public native RefNextIteration output(Output output); } /** Forwards the {@code index}th element of {@code inputs} to {@code output}. * * Arguments: * * scope: A Scope object * * index: A scalar that determines the input that gets selected. * * inputs: A list of ref tensors, one of which will be forwarded to {@code output}. * * Returns: * * {@code Output}: The forwarded tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class RefSelect extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RefSelect(Pointer p) { super(p); } public RefSelect(@Const @ByRef Scope scope, @ByVal Input index, @ByVal InputList inputs) { super((Pointer)null); allocate(scope, index, inputs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input index, @ByVal InputList inputs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native RefSelect operation(Operation operation); public native @ByRef Output output(); public native RefSelect output(Output output); } /** Forwards the ref tensor {@code data} to the output port determined by {@code pred}. * * If {@code pred} is true, the {@code data} input is forwarded to {@code output_true}. Otherwise, * the data goes to {@code output_false}. * * See also {@code Switch} and {@code Merge}. * * Arguments: * * scope: A Scope object * * data: The ref tensor to be forwarded to the appropriate output. * * pred: A scalar that specifies which output port will receive data. * * Returns: * * {@code Output} output_false: If {@code pred} is false, data will be forwarded to this output. * * {@code Output} output_true: If {@code pred} is true, data will be forwarded to this output. */ @Namespace("tensorflow::ops") @NoOffset public static class RefSwitch extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RefSwitch(Pointer p) { super(p); } public RefSwitch(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input pred) { super((Pointer)null); allocate(scope, data, pred); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input pred); public native @ByRef Operation operation(); public native RefSwitch operation(Operation operation); public native @ByRef Output output_false(); public native RefSwitch output_false(Output output_false); public native @ByRef Output output_true(); public native RefSwitch output_true(Output output_true); } /** Forwards {@code data} to the output port determined by {@code pred}. * * If {@code pred} is true, the {@code data} input is forwarded to {@code output_true}. Otherwise, * the data goes to {@code output_false}. * * See also {@code RefSwitch} and {@code Merge}. * * Arguments: * * scope: A Scope object * * data: The tensor to be forwarded to the appropriate output. * * pred: A scalar that specifies which output port will receive data. * * Returns: * * {@code Output} output_false: If {@code pred} is false, data will be forwarded to this output. * * {@code Output} output_true: If {@code pred} is true, data will be forwarded to this output. */ @Namespace("tensorflow::ops") @NoOffset public static class Switch extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Switch(Pointer p) { super(p); } public Switch(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input pred) { super((Pointer)null); allocate(scope, data, pred); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input pred); public native @ByRef Operation operation(); public native Switch operation(Operation operation); public native @ByRef Output output_false(); public native Switch output_false(Output output_false); public native @ByRef Output output_true(); public native Switch output_true(Output output_true); } /** \} */ // namespace ops // namespace tensorflow // #endif // TENSORFLOW_CC_OPS_CONTROL_FLOW_OPS_H_ // Parsed from tensorflow/cc/ops/data_flow_ops.h // This file is MACHINE GENERATED! Do not edit. // #ifndef TENSORFLOW_CC_OPS_DATA_FLOW_OPS_H_ // #define TENSORFLOW_CC_OPS_DATA_FLOW_OPS_H_ // This file is MACHINE GENERATED! Do not edit. // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/cc/framework/scope.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/gtl/array_slice.h" /** \defgroup data_flow_ops Data Flow Ops * \{

* Applies a gradient to a given accumulator. * * Does not add if local_step is lesser than the accumulator's global_step. * * Arguments: * * scope: A Scope object * * handle: The handle to a accumulator. * * local_step: The local_step value at which the gradient was computed. * * gradient: A tensor of the gradient to be accumulated. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class AccumulatorApplyGradient extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AccumulatorApplyGradient(Pointer p) { super(p); } public AccumulatorApplyGradient(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input local_step, @ByVal Input gradient) { super((Pointer)null); allocate(scope, handle, local_step, gradient); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input local_step, @ByVal Input gradient); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public native @ByRef Operation operation(); public native AccumulatorApplyGradient operation(Operation operation); } /** Returns the number of gradients aggregated in the given accumulators. * * Arguments: * * scope: A Scope object * * handle: The handle to an accumulator. * * Returns: * * {@code Output}: The number of gradients aggregated in the given accumulator. */ @Namespace("tensorflow::ops") @NoOffset public static class AccumulatorNumAccumulated extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AccumulatorNumAccumulated(Pointer p) { super(p); } public AccumulatorNumAccumulated(@Const @ByRef Scope scope, @ByVal Input handle) { super((Pointer)null); allocate(scope, handle); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native AccumulatorNumAccumulated operation(Operation operation); public native @ByRef Output num_accumulated(); public native AccumulatorNumAccumulated num_accumulated(Output num_accumulated); } /** Updates the accumulator with a new value for global_step. * * Logs warning if the accumulator's value is already higher than * new_global_step. * * Arguments: * * scope: A Scope object * * handle: The handle to an accumulator. * * new_global_step: The new global_step value to set. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class AccumulatorSetGlobalStep extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AccumulatorSetGlobalStep(Pointer p) { super(p); } public AccumulatorSetGlobalStep(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input new_global_step) { super((Pointer)null); allocate(scope, handle, new_global_step); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input new_global_step); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public native @ByRef Operation operation(); public native AccumulatorSetGlobalStep operation(Operation operation); } /** Extracts the average gradient in the given ConditionalAccumulator. * * The op blocks until sufficient (i.e., more than num_required) * gradients have been accumulated. If the accumulator has already * aggregated more than num_required gradients, it returns the average of * the accumulated gradients. Also automatically increments the recorded * global_step in the accumulator by 1, and resets the aggregate to 0. * * Arguments: * * scope: A Scope object * * handle: The handle to an accumulator. * * num_required: Number of gradients required before we return an aggregate. * * dtype: The data type of accumulated gradients. Needs to correspond to the type * of the accumulator. * * Returns: * * {@code Output}: The average of the accumulated gradients. */ @Namespace("tensorflow::ops") @NoOffset public static class AccumulatorTakeGradient extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AccumulatorTakeGradient(Pointer p) { super(p); } public AccumulatorTakeGradient(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input num_required, @Cast("tensorflow::DataType") int dtype) { super((Pointer)null); allocate(scope, handle, num_required, dtype); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input num_required, @Cast("tensorflow::DataType") int dtype); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native AccumulatorTakeGradient operation(Operation operation); public native @ByRef Output average(); public native AccumulatorTakeGradient average(Output average); } /** Defines a barrier that persists across different graph executions. * * A barrier represents a key-value map, where each key is a string, and * each value is a tuple of tensors. * * At runtime, the barrier contains 'complete' and 'incomplete' * elements. A complete element has defined tensors for all components of * its value tuple, and may be accessed using BarrierTakeMany. An * incomplete element has some undefined components in its value tuple, * and may be updated using BarrierInsertMany. * * Arguments: * * scope: A Scope object * * component_types: The type of each component in a value. * * Optional attributes (see {@code Attrs}): * * shapes: The shape of each component in a value. Each shape must be 1 in the * first dimension. The length of this attr must be the same as the length of * component_types. * * capacity: The capacity of the barrier. The default capacity is MAX_INT32, * which is the largest capacity of the underlying queue. * * container: If non-empty, this barrier is placed in the given container. * Otherwise, a default container is used. * * shared_name: If non-empty, this barrier will be shared under the given name * across multiple sessions. * * Returns: * * {@code Output}: The handle to the barrier. */ @Namespace("tensorflow::ops") @NoOffset public static class Barrier extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Barrier(Pointer p) { super(p); } /** Optional attribute setters for Barrier */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The shape of each component in a value. Each shape must be 1 in the * first dimension. The length of this attr must be the same as the length of * component_types. * * Defaults to [] */ /// public native @ByVal Attrs Shapes(@ArraySlice PartialTensorShape x); /** The capacity of the barrier. The default capacity is MAX_INT32, * which is the largest capacity of the underlying queue. * * Defaults to -1 */ /// public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** If non-empty, this barrier is placed in the given container. * Otherwise, a default container is used. * * Defaults to "" */ /// public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** If non-empty, this barrier will be shared under the given name * across multiple sessions. * * Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @ArraySlice PartialTensorShape shapes_(); public native Attrs shapes_(PartialTensorShape shapes_); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public Barrier(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types) { super((Pointer)null); allocate(scope, component_types); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types); public Barrier(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, component_types, attrs); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Shapes(@ArraySlice PartialTensorShape x); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native Barrier operation(Operation operation); public native @ByRef Output handle(); public native Barrier handle(Output handle); } /** Closes the given barrier. * * This operation signals that no more new elements will be inserted in the * given barrier. Subsequent InsertMany that try to introduce a new key will fail. * Subsequent InsertMany operations that just add missing components to already * existing elements will continue to succeed. Subsequent TakeMany operations will * continue to succeed if sufficient completed elements remain in the barrier. * Subsequent TakeMany operations that would block will fail immediately. * * Arguments: * * scope: A Scope object * * handle: The handle to a barrier. * * Optional attributes (see {@code Attrs}): * * cancel_pending_enqueues: If true, all pending enqueue requests that are * blocked on the barrier's queue will be canceled. InsertMany will fail, even * if no new key is introduced. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class BarrierClose extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BarrierClose(Pointer p) { super(p); } /** Optional attribute setters for BarrierClose */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, all pending enqueue requests that are * blocked on the barrier's queue will be canceled. InsertMany will fail, even * if no new key is introduced. * * Defaults to false */ public native @ByVal Attrs CancelPendingEnqueues(@Cast("bool") boolean x); public native @Cast("bool") boolean cancel_pending_enqueues_(); public native Attrs cancel_pending_enqueues_(boolean cancel_pending_enqueues_); } public BarrierClose(@Const @ByRef Scope scope, @ByVal Input handle) { super((Pointer)null); allocate(scope, handle); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle); public BarrierClose(@Const @ByRef Scope scope, @ByVal Input handle, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, handle, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs CancelPendingEnqueues(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native BarrierClose operation(Operation operation); } /** Computes the number of incomplete elements in the given barrier. * * Arguments: * * scope: A Scope object * * handle: The handle to a barrier. * * Returns: * * {@code Output}: The number of incomplete elements (i.e. those with some of their value * components not set) in the barrier. */ @Namespace("tensorflow::ops") @NoOffset public static class BarrierIncompleteSize extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BarrierIncompleteSize(Pointer p) { super(p); } public BarrierIncompleteSize(@Const @ByRef Scope scope, @ByVal Input handle) { super((Pointer)null); allocate(scope, handle); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native BarrierIncompleteSize operation(Operation operation); public native @ByRef Output size(); public native BarrierIncompleteSize size(Output size); } /** For each key, assigns the respective value to the specified component. * * If a key is not found in the barrier, this operation will create a new * incomplete element. If a key is found in the barrier, and the element * already has a value at component_index, this operation will fail with * INVALID_ARGUMENT, and leave the barrier in an undefined state. * * Arguments: * * scope: A Scope object * * handle: The handle to a barrier. * * keys: A one-dimensional tensor of keys, with length n. * * values: An any-dimensional tensor of values, which are associated with the * respective keys. The 0th dimension must have length n. * * component_index: The component of the barrier elements that is being assigned. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class BarrierInsertMany extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BarrierInsertMany(Pointer p) { super(p); } public BarrierInsertMany(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input keys, @ByVal Input values, @Cast("tensorflow::int64") long component_index) { super((Pointer)null); allocate(scope, handle, keys, values, component_index); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input keys, @ByVal Input values, @Cast("tensorflow::int64") long component_index); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public native @ByRef Operation operation(); public native BarrierInsertMany operation(Operation operation); } /** Computes the number of complete elements in the given barrier. * * Arguments: * * scope: A Scope object * * handle: The handle to a barrier. * * Returns: * * {@code Output}: The number of complete elements (i.e. those with all of their value * components set) in the barrier. */ @Namespace("tensorflow::ops") @NoOffset public static class BarrierReadySize extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BarrierReadySize(Pointer p) { super(p); } public BarrierReadySize(@Const @ByRef Scope scope, @ByVal Input handle) { super((Pointer)null); allocate(scope, handle); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native BarrierReadySize operation(Operation operation); public native @ByRef Output size(); public native BarrierReadySize size(Output size); } /** Takes the given number of completed elements from a barrier. * * This operation concatenates completed-element component tensors along * the 0th dimension to make a single component tensor. * * Elements come out of the barrier when they are complete, and in the order * in which they were placed into the barrier. The indices output provides * information about the batch in which each element was originally inserted * into the barrier. * * Arguments: * * scope: A Scope object * * handle: The handle to a barrier. * * num_elements: A single-element tensor containing the number of elements to * take. * * component_types: The type of each component in a value. * * Optional attributes (see {@code Attrs}): * * allow_small_batch: Allow to return less than num_elements items if barrier is * already closed. * * timeout_ms: If the queue is empty, this operation will block for up to * timeout_ms milliseconds. * Note: This option is not supported yet. * * Returns: * * {@code Output} indices: A one-dimensional tensor of indices, with length num_elems. * These indices refer to the batch in which the values were placed into the * barrier (starting with MIN_LONG and increasing with each BarrierInsertMany). * * {@code Output} keys: A one-dimensional tensor of keys, with length num_elements. * * {@code OutputList} values: One any-dimensional tensor per component in a barrier element. All * values have length num_elements in the 0th dimension. */ @Namespace("tensorflow::ops") @NoOffset public static class BarrierTakeMany extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BarrierTakeMany(Pointer p) { super(p); } /** Optional attribute setters for BarrierTakeMany */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Allow to return less than num_elements items if barrier is * already closed. * * Defaults to false */ public native @ByVal Attrs AllowSmallBatch(@Cast("bool") boolean x); /** Defaults to false */ /// public native @ByVal Attrs WaitForIncomplete(@Cast("bool") boolean x); /** If the queue is empty, this operation will block for up to * timeout_ms milliseconds. * Note: This option is not supported yet. * * Defaults to -1 */ public native @ByVal Attrs TimeoutMs(@Cast("tensorflow::int64") long x); public native @Cast("bool") boolean allow_small_batch_(); public native Attrs allow_small_batch_(boolean allow_small_batch_); public native @Cast("bool") boolean wait_for_incomplete_(); public native Attrs wait_for_incomplete_(boolean wait_for_incomplete_); public native @Cast("tensorflow::int64") long timeout_ms_(); public native Attrs timeout_ms_(long timeout_ms_); } public BarrierTakeMany(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input num_elements, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types) { super((Pointer)null); allocate(scope, handle, num_elements, component_types); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input num_elements, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types); public BarrierTakeMany(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input num_elements, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, handle, num_elements, component_types, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input num_elements, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types, @Const @ByRef Attrs attrs); public static native @ByVal Attrs AllowSmallBatch(@Cast("bool") boolean x); public static native @ByVal Attrs WaitForIncomplete(@Cast("bool") boolean x); public static native @ByVal Attrs TimeoutMs(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native BarrierTakeMany operation(Operation operation); public native @ByRef Output indices(); public native BarrierTakeMany indices(Output indices); public native @ByRef Output keys(); public native BarrierTakeMany keys(Output keys); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector values(); public native BarrierTakeMany values(OutputVector values); } /** A conditional accumulator for aggregating gradients. * * The accumulator accepts gradients marked with local_step greater or * equal to the most recent global_step known to the accumulator. The * average can be extracted from the accumulator, provided sufficient * gradients have been accumulated. Extracting the average automatically * resets the aggregate to 0, and increments the global_step recorded by * the accumulator. * * Arguments: * * scope: A Scope object * * dtype: The type of the value being accumulated. * * shape: The shape of the values, can be [], in which case shape is unknown. * * Optional attributes (see {@code Attrs}): * * container: If non-empty, this accumulator is placed in the given container. * Otherwise, a default container is used. * * shared_name: If non-empty, this accumulator will be shared under the * given name across multiple sessions. * * Returns: * * {@code Output}: The handle to the accumulator. */ @Namespace("tensorflow::ops") @NoOffset public static class ConditionalAccumulator extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConditionalAccumulator(Pointer p) { super(p); } /** Optional attribute setters for ConditionalAccumulator */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If non-empty, this accumulator is placed in the given container. * Otherwise, a default container is used. * * Defaults to "" */ /// public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** If non-empty, this accumulator will be shared under the * given name across multiple sessions. * * Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); /** Defaults to "MEAN" */ public native @ByVal Attrs ReductionType(@StringPiece BytePointer x); public native @ByVal Attrs ReductionType(@StringPiece String x); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); public native @StringPiece BytePointer reduction_type_(); public native Attrs reduction_type_(BytePointer reduction_type_); } public ConditionalAccumulator(@Const @ByRef Scope scope, @Cast("tensorflow::DataType") int dtype, @ByVal PartialTensorShape shape) { super((Pointer)null); allocate(scope, dtype, shape); } private native void allocate(@Const @ByRef Scope scope, @Cast("tensorflow::DataType") int dtype, @ByVal PartialTensorShape shape); public ConditionalAccumulator(@Const @ByRef Scope scope, @Cast("tensorflow::DataType") int dtype, @ByVal PartialTensorShape shape, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, dtype, shape, attrs); } private native void allocate(@Const @ByRef Scope scope, @Cast("tensorflow::DataType") int dtype, @ByVal PartialTensorShape shape, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public static native @ByVal Attrs ReductionType(@StringPiece BytePointer x); public static native @ByVal Attrs ReductionType(@StringPiece String x); public native @ByRef Operation operation(); public native ConditionalAccumulator operation(Operation operation); public native @ByRef Output handle(); public native ConditionalAccumulator handle(Output handle); } /** Delete the tensor specified by its handle in the session. * * Arguments: * * scope: A Scope object * * handle: The handle for a tensor stored in the session state. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class DeleteSessionTensor extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DeleteSessionTensor(Pointer p) { super(p); } public DeleteSessionTensor(@Const @ByRef Scope scope, @ByVal Input handle) { super((Pointer)null); allocate(scope, handle); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public native @ByRef Operation operation(); public native DeleteSessionTensor operation(Operation operation); } /** Partitions {@code data} into {@code num_partitions} tensors using indices from {@code partitions}. * * For each index tuple {@code js} of size {@code partitions.ndim}, the slice {@code data[js, ...]} * becomes part of {@code outputs[partitions[js]]}. The slices with {@code partitions[js] = i} * are placed in {@code outputs[i]} in lexicographic order of {@code js}, and the first * dimension of {@code outputs[i]} is the number of entries in {@code partitions} equal to {@code i}. * In detail, * *

{@code python
 *      outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
 * 
 *      outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
 *  }
* * {@code data.shape} must start with {@code partitions.shape}. * * For example: * *
{@code python
 *      # Scalar partitions.
 *      partitions = 1
 *      num_partitions = 2
 *      data = [10, 20]
 *      outputs[0] = []  # Empty with shape [0, 2]
 *      outputs[1] = [[10, 20]]
 * 
 *      # Vector partitions.
 *      partitions = [0, 0, 1, 1, 0]
 *      num_partitions = 2
 *      data = [10, 20, 30, 40, 50]
 *      outputs[0] = [10, 20, 50]
 *      outputs[1] = [30, 40]
 *  }
* * See {@code dynamic_stitch} for an example on how to merge partitions back. * *
* *
* * Arguments: * * scope: A Scope object * * partitions: Any shape. Indices in the range {@code [0, num_partitions)}. * * num_partitions: The number of partitions to output. * * Returns: * * {@code OutputList}: The outputs tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class DynamicPartition extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DynamicPartition(Pointer p) { super(p); } public DynamicPartition(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input partitions, @Cast("tensorflow::int64") long num_partitions) { super((Pointer)null); allocate(scope, data, partitions, num_partitions); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input partitions, @Cast("tensorflow::int64") long num_partitions); public native @ByVal @Name("operator []") Output get(@Cast("size_t") long index); public native @ByRef Operation operation(); public native DynamicPartition operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector outputs(); public native DynamicPartition outputs(OutputVector outputs); } /** Interleave the values from the {@code data} tensors into a single tensor. * * Builds a merged tensor such that * *
{@code python
 *      merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
 *  }
* * For example, if each {@code indices[m]} is scalar or vector, we have * *
{@code python
 *      # Scalar indices:
 *      merged[indices[m], ...] = data[m][...]
 * 
 *      # Vector indices:
 *      merged[indices[m][i], ...] = data[m][i, ...]
 *  }
* * Each {@code data[i].shape} must start with the corresponding {@code indices[i].shape}, * and the rest of {@code data[i].shape} must be constant w.r.t. {@code i}. That is, we * must have {@code data[i].shape = indices[i].shape + constant}. In terms of this * {@code constant}, the output shape is * * merged.shape = [max(indices)] + constant * * Values are merged in order, so if an index appears in both {@code indices[m][i]} and * {@code indices[n][j]} for {@code (m,i) < (n,j)} the slice {@code data[n][j]} will appear in the * merged result. If you do not need this guarantee, ParallelDynamicStitch might * perform better on some devices. * * For example: * *
{@code python
 *      indices[0] = 6
 *      indices[1] = [4, 1]
 *      indices[2] = [[5, 2], [0, 3]]
 *      data[0] = [61, 62]
 *      data[1] = [[41, 42], [11, 12]]
 *      data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
 *      merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
 *                [51, 52], [61, 62]]
 *  }
* * This method can be used to merge partitions created by {@code dynamic_partition} * as illustrated on the following example: * *
{@code python
 *      # Apply function (increments x_i) on elements for which a certain condition
 *      # apply (x_i != -1 in this example).
 *      x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
 *      condition_mask=tf.not_equal(x,tf.constant(-1.))
 *      partitioned_data = tf.dynamic_partition(
 *          x, tf.cast(condition_mask, tf.int32) , 2)
 *      partitioned_data[1] = partitioned_data[1] + 1.0
 *      condition_indices = tf.dynamic_partition(
 *          tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
 *      x = tf.dynamic_stitch(condition_indices, partitioned_data)
 *      # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
 *      # unchanged.
 *  }
* *
* *
* * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The merged tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class DynamicStitch extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DynamicStitch(Pointer p) { super(p); } public DynamicStitch(@Const @ByRef Scope scope, @ByVal InputList indices, @ByVal InputList data) { super((Pointer)null); allocate(scope, indices, data); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList indices, @ByVal InputList data); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native DynamicStitch operation(Operation operation); public native @ByRef Output merged(); public native DynamicStitch merged(Output merged); } /** A queue that produces elements in first-in first-out order. * * Arguments: * * scope: A Scope object * * component_types: The type of each component in a value. * * Optional attributes (see {@code Attrs}): * * shapes: The shape of each component in a value. The length of this attr must * be either 0 or the same as the length of component_types. If the length of * this attr is 0, the shapes of queue elements are not constrained, and * only one element may be dequeued at a time. * * capacity: The upper bound on the number of elements in this queue. * Negative numbers mean no limit. * * container: If non-empty, this queue is placed in the given container. * Otherwise, a default container is used. * * shared_name: If non-empty, this queue will be shared under the given name * across multiple sessions. * * Returns: * * {@code Output}: The handle to the queue. */ @Namespace("tensorflow::ops") @NoOffset public static class FIFOQueue extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FIFOQueue(Pointer p) { super(p); } /** Optional attribute setters for FIFOQueue */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The shape of each component in a value. The length of this attr must * be either 0 or the same as the length of component_types. If the length of * this attr is 0, the shapes of queue elements are not constrained, and * only one element may be dequeued at a time. * * Defaults to [] */ /// public native @ByVal Attrs Shapes(@ArraySlice PartialTensorShape x); /** The upper bound on the number of elements in this queue. * Negative numbers mean no limit. * * Defaults to -1 */ /// public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** If non-empty, this queue is placed in the given container. * Otherwise, a default container is used. * * Defaults to "" */ /// public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** If non-empty, this queue will be shared under the given name * across multiple sessions. * * Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @ArraySlice PartialTensorShape shapes_(); public native Attrs shapes_(PartialTensorShape shapes_); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public FIFOQueue(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types) { super((Pointer)null); allocate(scope, component_types); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types); public FIFOQueue(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, component_types, attrs); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Shapes(@ArraySlice PartialTensorShape x); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native FIFOQueue operation(Operation operation); public native @ByRef Output handle(); public native FIFOQueue handle(Output handle); } /** Store the input tensor in the state of the current session. * * Arguments: * * scope: A Scope object * * value: The tensor to be stored. * * Returns: * * {@code Output}: The handle for the tensor stored in the session state, represented * as a string. */ @Namespace("tensorflow::ops") @NoOffset public static class GetSessionHandle extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GetSessionHandle(Pointer p) { super(p); } public GetSessionHandle(@Const @ByRef Scope scope, @ByVal Input value) { super((Pointer)null); allocate(scope, value); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native GetSessionHandle operation(Operation operation); public native @ByRef Output handle(); public native GetSessionHandle handle(Output handle); } /** Store the input tensor in the state of the current session. * * Arguments: * * scope: A Scope object * * value: The tensor to be stored. * * Returns: * * {@code Output}: The handle for the tensor stored in the session state, represented * as a ResourceHandle object. */ @Namespace("tensorflow::ops") @NoOffset public static class GetSessionHandleV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GetSessionHandleV2(Pointer p) { super(p); } public GetSessionHandleV2(@Const @ByRef Scope scope, @ByVal Input value) { super((Pointer)null); allocate(scope, value); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native GetSessionHandleV2 operation(Operation operation); public native @ByRef Output handle(); public native GetSessionHandleV2 handle(Output handle); } /** Get the value of the tensor specified by its handle. * * Arguments: * * scope: A Scope object * * handle: The handle for a tensor stored in the session state. * * dtype: The type of the output value. * * Returns: * * {@code Output}: The tensor for the given handle. */ @Namespace("tensorflow::ops") @NoOffset public static class GetSessionTensor extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GetSessionTensor(Pointer p) { super(p); } public GetSessionTensor(@Const @ByRef Scope scope, @ByVal Input handle, @Cast("tensorflow::DataType") int dtype) { super((Pointer)null); allocate(scope, handle, dtype); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @Cast("tensorflow::DataType") int dtype); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native GetSessionTensor operation(Operation operation); public native @ByRef Output value(); public native GetSessionTensor value(Output value); } /** Op removes all elements in the underlying container. * * Arguments: * * scope: A Scope object * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class MapClear extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MapClear(Pointer p) { super(p); } /** Optional attribute setters for MapClear */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); /** Defaults to "" */ public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @Cast("tensorflow::int64") long memory_limit_(); public native Attrs memory_limit_(long memory_limit_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public MapClear(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes) { super((Pointer)null); allocate(scope, dtypes); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes); public MapClear(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, dtypes, attrs); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native MapClear operation(Operation operation); } /** Op returns the number of incomplete elements in the underlying container. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The size tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class MapIncompleteSize extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MapIncompleteSize(Pointer p) { super(p); } /** Optional attribute setters for MapIncompleteSize */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); /** Defaults to "" */ public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @Cast("tensorflow::int64") long memory_limit_(); public native Attrs memory_limit_(long memory_limit_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public MapIncompleteSize(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes) { super((Pointer)null); allocate(scope, dtypes); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes); public MapIncompleteSize(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, dtypes, attrs); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native MapIncompleteSize operation(Operation operation); public native @ByRef Output size(); public native MapIncompleteSize size(Output size); } /** Op peeks at the values at the specified key. If the * * underlying container does not contain this key * this op will block until it does. * * Arguments: * * scope: A Scope object * * Returns: * * {@code OutputList}: The values tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class MapPeek extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MapPeek(Pointer p) { super(p); } /** Optional attribute setters for MapPeek */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); /** Defaults to "" */ public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @Cast("tensorflow::int64") long memory_limit_(); public native Attrs memory_limit_(long memory_limit_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public MapPeek(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes) { super((Pointer)null); allocate(scope, key, indices, dtypes); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes); public MapPeek(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, key, indices, dtypes, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator []") Output get(@Cast("size_t") long index); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native MapPeek operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector values(); public native MapPeek values(OutputVector values); } /** Op returns the number of elements in the underlying container. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The size tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class MapSize extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MapSize(Pointer p) { super(p); } /** Optional attribute setters for MapSize */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); /** Defaults to "" */ public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @Cast("tensorflow::int64") long memory_limit_(); public native Attrs memory_limit_(long memory_limit_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public MapSize(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes) { super((Pointer)null); allocate(scope, dtypes); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes); public MapSize(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, dtypes, attrs); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native MapSize operation(Operation operation); public native @ByRef Output size(); public native MapSize size(Output size); } /** Stage (key, values) in the underlying container which behaves like a hashtable. * * Arguments: * * scope: A Scope object * * key: int64 * * values: a list of tensors * dtypes A list of data types that inserted values should adhere to. * * Optional attributes (see {@code Attrs}): * * capacity: Maximum number of elements in the Staging Area. If > 0, inserts * on the container will block when the capacity is reached. * * container: If non-empty, this queue is placed in the given container. Otherwise, * a default container is used. * * shared_name: It is necessary to match this name to the matching Unstage Op. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class MapStage extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MapStage(Pointer p) { super(p); } /** Optional attribute setters for MapStage */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Maximum number of elements in the Staging Area. If > 0, inserts * on the container will block when the capacity is reached. * * Defaults to 0 */ public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ /// public native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); /** If non-empty, this queue is placed in the given container. Otherwise, * a default container is used. * * Defaults to "" */ /// public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** It is necessary to match this name to the matching Unstage Op. * * Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @Cast("tensorflow::int64") long memory_limit_(); public native Attrs memory_limit_(long memory_limit_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public MapStage(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @ByVal InputList values, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes) { super((Pointer)null); allocate(scope, key, indices, values, dtypes); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @ByVal InputList values, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes); public MapStage(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @ByVal InputList values, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, key, indices, values, dtypes, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @ByVal InputList values, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native MapStage operation(Operation operation); } /** Op removes and returns the values associated with the key * * from the underlying container. If the underlying container * does not contain this key, the op will block until it does. * * Arguments: * * scope: A Scope object * * Returns: * * {@code OutputList}: The values tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class MapUnstage extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MapUnstage(Pointer p) { super(p); } /** Optional attribute setters for MapUnstage */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); /** Defaults to "" */ public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @Cast("tensorflow::int64") long memory_limit_(); public native Attrs memory_limit_(long memory_limit_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public MapUnstage(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes) { super((Pointer)null); allocate(scope, key, indices, dtypes); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes); public MapUnstage(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, key, indices, dtypes, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator []") Output get(@Cast("size_t") long index); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native MapUnstage operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector values(); public native MapUnstage values(OutputVector values); } /** Op removes and returns a random (key, value) * * from the underlying container. If the underlying container * does not contain elements, the op will block until it does. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output} key * * {@code OutputList} values */ @Namespace("tensorflow::ops") @NoOffset public static class MapUnstageNoKey extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MapUnstageNoKey(Pointer p) { super(p); } /** Optional attribute setters for MapUnstageNoKey */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); /** Defaults to "" */ public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @Cast("tensorflow::int64") long memory_limit_(); public native Attrs memory_limit_(long memory_limit_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public MapUnstageNoKey(@Const @ByRef Scope scope, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes) { super((Pointer)null); allocate(scope, indices, dtypes); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes); public MapUnstageNoKey(@Const @ByRef Scope scope, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, indices, dtypes, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native MapUnstageNoKey operation(Operation operation); public native @ByRef Output key(); public native MapUnstageNoKey key(Output key); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector values(); public native MapUnstageNoKey values(OutputVector values); } /** Op removes all elements in the underlying container. * * Arguments: * * scope: A Scope object * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class OrderedMapClear extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OrderedMapClear(Pointer p) { super(p); } /** Optional attribute setters for OrderedMapClear */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); /** Defaults to "" */ public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @Cast("tensorflow::int64") long memory_limit_(); public native Attrs memory_limit_(long memory_limit_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public OrderedMapClear(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes) { super((Pointer)null); allocate(scope, dtypes); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes); public OrderedMapClear(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, dtypes, attrs); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native OrderedMapClear operation(Operation operation); } /** Op returns the number of incomplete elements in the underlying container. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The size tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class OrderedMapIncompleteSize extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OrderedMapIncompleteSize(Pointer p) { super(p); } /** Optional attribute setters for OrderedMapIncompleteSize */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); /** Defaults to "" */ public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @Cast("tensorflow::int64") long memory_limit_(); public native Attrs memory_limit_(long memory_limit_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public OrderedMapIncompleteSize(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes) { super((Pointer)null); allocate(scope, dtypes); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes); public OrderedMapIncompleteSize(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, dtypes, attrs); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native OrderedMapIncompleteSize operation(Operation operation); public native @ByRef Output size(); public native OrderedMapIncompleteSize size(Output size); } /** Op peeks at the values at the specified key. If the * * underlying container does not contain this key * this op will block until it does. This Op is optimized for * performance. * * Arguments: * * scope: A Scope object * * Returns: * * {@code OutputList}: The values tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class OrderedMapPeek extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OrderedMapPeek(Pointer p) { super(p); } /** Optional attribute setters for OrderedMapPeek */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); /** Defaults to "" */ public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @Cast("tensorflow::int64") long memory_limit_(); public native Attrs memory_limit_(long memory_limit_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public OrderedMapPeek(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes) { super((Pointer)null); allocate(scope, key, indices, dtypes); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes); public OrderedMapPeek(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, key, indices, dtypes, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator []") Output get(@Cast("size_t") long index); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native OrderedMapPeek operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector values(); public native OrderedMapPeek values(OutputVector values); } /** Op returns the number of elements in the underlying container. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The size tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class OrderedMapSize extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OrderedMapSize(Pointer p) { super(p); } /** Optional attribute setters for OrderedMapSize */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); /** Defaults to "" */ public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @Cast("tensorflow::int64") long memory_limit_(); public native Attrs memory_limit_(long memory_limit_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public OrderedMapSize(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes) { super((Pointer)null); allocate(scope, dtypes); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes); public OrderedMapSize(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, dtypes, attrs); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native OrderedMapSize operation(Operation operation); public native @ByRef Output size(); public native OrderedMapSize size(Output size); } /** Stage (key, values) in the underlying container which behaves like a ordered * * associative container. Elements are ordered by key. * * Arguments: * * scope: A Scope object * * key: int64 * * values: a list of tensors * dtypes A list of data types that inserted values should adhere to. * * Optional attributes (see {@code Attrs}): * * capacity: Maximum number of elements in the Staging Area. If > 0, inserts * on the container will block when the capacity is reached. * * container: If non-empty, this queue is placed in the given container. Otherwise, * a default container is used. * * shared_name: It is necessary to match this name to the matching Unstage Op. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class OrderedMapStage extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OrderedMapStage(Pointer p) { super(p); } /** Optional attribute setters for OrderedMapStage */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Maximum number of elements in the Staging Area. If > 0, inserts * on the container will block when the capacity is reached. * * Defaults to 0 */ public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ /// public native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); /** If non-empty, this queue is placed in the given container. Otherwise, * a default container is used. * * Defaults to "" */ /// public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** It is necessary to match this name to the matching Unstage Op. * * Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @Cast("tensorflow::int64") long memory_limit_(); public native Attrs memory_limit_(long memory_limit_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public OrderedMapStage(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @ByVal InputList values, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes) { super((Pointer)null); allocate(scope, key, indices, values, dtypes); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @ByVal InputList values, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes); public OrderedMapStage(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @ByVal InputList values, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, key, indices, values, dtypes, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @ByVal InputList values, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native OrderedMapStage operation(Operation operation); } /** Op removes and returns the values associated with the key * * from the underlying container. If the underlying container * does not contain this key, the op will block until it does. * * Arguments: * * scope: A Scope object * * Returns: * * {@code OutputList}: The values tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class OrderedMapUnstage extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OrderedMapUnstage(Pointer p) { super(p); } /** Optional attribute setters for OrderedMapUnstage */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); /** Defaults to "" */ public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @Cast("tensorflow::int64") long memory_limit_(); public native Attrs memory_limit_(long memory_limit_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public OrderedMapUnstage(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes) { super((Pointer)null); allocate(scope, key, indices, dtypes); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes); public OrderedMapUnstage(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, key, indices, dtypes, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input key, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator []") Output get(@Cast("size_t") long index); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native OrderedMapUnstage operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector values(); public native OrderedMapUnstage values(OutputVector values); } /** Op removes and returns the (key, value) element with the smallest * * key from the underlying container. If the underlying container * does not contain elements, the op will block until it does. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output} key * * {@code OutputList} values */ @Namespace("tensorflow::ops") @NoOffset public static class OrderedMapUnstageNoKey extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OrderedMapUnstageNoKey(Pointer p) { super(p); } /** Optional attribute setters for OrderedMapUnstageNoKey */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); /** Defaults to "" */ public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @Cast("tensorflow::int64") long memory_limit_(); public native Attrs memory_limit_(long memory_limit_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public OrderedMapUnstageNoKey(@Const @ByRef Scope scope, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes) { super((Pointer)null); allocate(scope, indices, dtypes); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes); public OrderedMapUnstageNoKey(@Const @ByRef Scope scope, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, indices, dtypes, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input indices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native OrderedMapUnstageNoKey operation(Operation operation); public native @ByRef Output key(); public native OrderedMapUnstageNoKey key(Output key); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector values(); public native OrderedMapUnstageNoKey values(OutputVector values); } /** A queue that produces elements in first-in first-out order. * * Variable-size shapes are allowed by setting the corresponding shape dimensions * to 0 in the shape attr. In this case DequeueMany will pad up to the maximum * size of any given element in the minibatch. See below for details. * * Arguments: * * scope: A Scope object * * component_types: The type of each component in a value. * * Optional attributes (see {@code Attrs}): * * shapes: The shape of each component in a value. The length of this attr must * be either 0 or the same as the length of component_types. * Shapes of fixed rank but variable size are allowed by setting * any shape dimension to -1. In this case, the inputs' shape may vary along * the given dimension, and DequeueMany will pad the given dimension with * zeros up to the maximum shape of all elements in the given batch. * If the length of this attr is 0, different queue elements may have * different ranks and shapes, but only one element may be dequeued at a time. * * capacity: The upper bound on the number of elements in this queue. * Negative numbers mean no limit. * * container: If non-empty, this queue is placed in the given container. * Otherwise, a default container is used. * * shared_name: If non-empty, this queue will be shared under the given name * across multiple sessions. * * Returns: * * {@code Output}: The handle to the queue. */ @Namespace("tensorflow::ops") @NoOffset public static class PaddingFIFOQueue extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PaddingFIFOQueue(Pointer p) { super(p); } /** Optional attribute setters for PaddingFIFOQueue */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The shape of each component in a value. The length of this attr must * be either 0 or the same as the length of component_types. * Shapes of fixed rank but variable size are allowed by setting * any shape dimension to -1. In this case, the inputs' shape may vary along * the given dimension, and DequeueMany will pad the given dimension with * zeros up to the maximum shape of all elements in the given batch. * If the length of this attr is 0, different queue elements may have * different ranks and shapes, but only one element may be dequeued at a time. * * Defaults to [] */ /// public native @ByVal Attrs Shapes(@ArraySlice PartialTensorShape x); /** The upper bound on the number of elements in this queue. * Negative numbers mean no limit. * * Defaults to -1 */ /// public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** If non-empty, this queue is placed in the given container. * Otherwise, a default container is used. * * Defaults to "" */ /// public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** If non-empty, this queue will be shared under the given name * across multiple sessions. * * Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @ArraySlice PartialTensorShape shapes_(); public native Attrs shapes_(PartialTensorShape shapes_); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public PaddingFIFOQueue(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types) { super((Pointer)null); allocate(scope, component_types); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types); public PaddingFIFOQueue(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, component_types, attrs); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Shapes(@ArraySlice PartialTensorShape x); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native PaddingFIFOQueue operation(Operation operation); public native @ByRef Output handle(); public native PaddingFIFOQueue handle(Output handle); } /** Interleave the values from the {@code data} tensors into a single tensor. * * Builds a merged tensor such that * *
{@code python
 *      merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
 *  }
* * For example, if each {@code indices[m]} is scalar or vector, we have * *
{@code python
 *      # Scalar indices:
 *      merged[indices[m], ...] = data[m][...]
 * 
 *      # Vector indices:
 *      merged[indices[m][i], ...] = data[m][i, ...]
 *  }
* * Each {@code data[i].shape} must start with the corresponding {@code indices[i].shape}, * and the rest of {@code data[i].shape} must be constant w.r.t. {@code i}. That is, we * must have {@code data[i].shape = indices[i].shape + constant}. In terms of this * {@code constant}, the output shape is * * merged.shape = [max(indices)] + constant * * Values may be merged in parallel, so if an index appears in both {@code indices[m][i]} * and {@code indices[n][j]}, the result may be invalid. This differs from the normal * DynamicStitch operator that defines the behavior in that case. * * For example: * *
{@code python
 *      indices[0] = 6
 *      indices[1] = [4, 1]
 *      indices[2] = [[5, 2], [0, 3]]
 *      data[0] = [61, 62]
 *      data[1] = [[41, 42], [11, 12]]
 *      data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
 *      merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
 *                [51, 52], [61, 62]]
 *  }
* * This method can be used to merge partitions created by {@code dynamic_partition} * as illustrated on the following example: * *
{@code python
 *      # Apply function (increments x_i) on elements for which a certain condition
 *      # apply (x_i != -1 in this example).
 *      x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
 *      condition_mask=tf.not_equal(x,tf.constant(-1.))
 *      partitioned_data = tf.dynamic_partition(
 *          x, tf.cast(condition_mask, tf.int32) , 2)
 *      partitioned_data[1] = partitioned_data[1] + 1.0
 *      condition_indices = tf.dynamic_partition(
 *          tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
 *      x = tf.dynamic_stitch(condition_indices, partitioned_data)
 *      # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
 *      # unchanged.
 *  }
* *
* *
* * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The merged tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class ParallelDynamicStitch extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ParallelDynamicStitch(Pointer p) { super(p); } public ParallelDynamicStitch(@Const @ByRef Scope scope, @ByVal InputList indices, @ByVal InputList data) { super((Pointer)null); allocate(scope, indices, data); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList indices, @ByVal InputList data); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native ParallelDynamicStitch operation(Operation operation); public native @ByRef Output merged(); public native ParallelDynamicStitch merged(Output merged); } /** A queue that produces elements sorted by the first component value. * * Note that the PriorityQueue requires the first component of any element * to be a scalar int64, in addition to the other elements declared by * component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue * and DequeueMany) on a PriorityQueue will all require (resp. output) one extra * entry in their input (resp. output) lists. * * Arguments: * * scope: A Scope object * * shapes: The shape of each component in a value. The length of this attr must * be either 0 or the same as the length of component_types. If the length of * this attr is 0, the shapes of queue elements are not constrained, and * only one element may be dequeued at a time. * * Optional attributes (see {@code Attrs}): * * component_types: The type of each component in a value. * * capacity: The upper bound on the number of elements in this queue. * Negative numbers mean no limit. * * container: If non-empty, this queue is placed in the given container. * Otherwise, a default container is used. * * shared_name: If non-empty, this queue will be shared under the given name * across multiple sessions. * * Returns: * * {@code Output}: The handle to the queue. */ @Namespace("tensorflow::ops") @NoOffset public static class PriorityQueue extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PriorityQueue(Pointer p) { super(p); } /** Optional attribute setters for PriorityQueue */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The type of each component in a value. * * Defaults to [] */ /// public native @ByVal Attrs ComponentTypes(@Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector x); /** The upper bound on the number of elements in this queue. * Negative numbers mean no limit. * * Defaults to -1 */ /// public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** If non-empty, this queue is placed in the given container. * Otherwise, a default container is used. * * Defaults to "" */ /// public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** If non-empty, this queue will be shared under the given name * across multiple sessions. * * Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef @Cast("tensorflow::DataTypeSlice*") DataTypeVector component_types_(); public native Attrs component_types_(DataTypeVector component_types_); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public PriorityQueue(@Const @ByRef Scope scope, @ArraySlice PartialTensorShape shapes) { super((Pointer)null); allocate(scope, shapes); } private native void allocate(@Const @ByRef Scope scope, @ArraySlice PartialTensorShape shapes); public PriorityQueue(@Const @ByRef Scope scope, @ArraySlice PartialTensorShape shapes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, shapes, attrs); } private native void allocate(@Const @ByRef Scope scope, @ArraySlice PartialTensorShape shapes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs ComponentTypes(@Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector x); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native PriorityQueue operation(Operation operation); public native @ByRef Output handle(); public native PriorityQueue handle(Output handle); } /** Closes the given queue. * * This operation signals that no more elements will be enqueued in the * given queue. Subsequent Enqueue(Many) operations will fail. * Subsequent Dequeue(Many) operations will continue to succeed if * sufficient elements remain in the queue. Subsequent Dequeue(Many) * operations that would block will fail immediately. * * Arguments: * * scope: A Scope object * * handle: The handle to a queue. * * Optional attributes (see {@code Attrs}): * * cancel_pending_enqueues: If true, all pending enqueue requests that are * blocked on the given queue will be canceled. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class QueueClose extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QueueClose(Pointer p) { super(p); } /** Optional attribute setters for QueueClose */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, all pending enqueue requests that are * blocked on the given queue will be canceled. * * Defaults to false */ public native @ByVal Attrs CancelPendingEnqueues(@Cast("bool") boolean x); public native @Cast("bool") boolean cancel_pending_enqueues_(); public native Attrs cancel_pending_enqueues_(boolean cancel_pending_enqueues_); } public QueueClose(@Const @ByRef Scope scope, @ByVal Input handle) { super((Pointer)null); allocate(scope, handle); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle); public QueueClose(@Const @ByRef Scope scope, @ByVal Input handle, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, handle, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs CancelPendingEnqueues(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native QueueClose operation(Operation operation); } /** Dequeues {@code n} tuples of one or more tensors from the given queue. * * If the queue is closed and there are fewer than {@code n} elements, then an * OutOfRange error is returned. * * This operation concatenates queue-element component tensors along the * 0th dimension to make a single component tensor. All of the components * in the dequeued tuple will have size {@code n} in the 0th dimension. * * This operation has {@code k} outputs, where {@code k} is the number of components in * the tuples stored in the given queue, and output {@code i} is the ith * component of the dequeued tuple. * * N.B. If the queue is empty, this operation will block until {@code n} elements * have been dequeued (or 'timeout_ms' elapses, if specified). * * Arguments: * * scope: A Scope object * * handle: The handle to a queue. * * n: The number of tuples to dequeue. * * component_types: The type of each component in a tuple. * * Optional attributes (see {@code Attrs}): * * timeout_ms: If the queue has fewer than n elements, this operation * will block for up to timeout_ms milliseconds. * Note: This option is not supported yet. * * Returns: * * {@code OutputList}: One or more tensors that were dequeued as a tuple. */ @Namespace("tensorflow::ops") @NoOffset public static class QueueDequeueMany extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QueueDequeueMany(Pointer p) { super(p); } /** Optional attribute setters for QueueDequeueMany */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If the queue has fewer than n elements, this operation * will block for up to timeout_ms milliseconds. * Note: This option is not supported yet. * * Defaults to -1 */ public native @ByVal Attrs TimeoutMs(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long timeout_ms_(); public native Attrs timeout_ms_(long timeout_ms_); } public QueueDequeueMany(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input n, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types) { super((Pointer)null); allocate(scope, handle, n, component_types); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input n, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types); public QueueDequeueMany(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input n, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, handle, n, component_types, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input n, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator []") Output get(@Cast("size_t") long index); public static native @ByVal Attrs TimeoutMs(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native QueueDequeueMany operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector components(); public native QueueDequeueMany components(OutputVector components); } /** Dequeues {@code n} tuples of one or more tensors from the given queue. * * This operation is not supported by all queues. If a queue does not support * DequeueUpTo, then an Unimplemented error is returned. * * If the queue is closed and there are more than 0 but less than {@code n} * elements remaining, then instead of returning an OutOfRange error like * QueueDequeueMany, less than {@code n} elements are returned immediately. If * the queue is closed and there are 0 elements left in the queue, then * an OutOfRange error is returned just like in QueueDequeueMany. * Otherwise the behavior is identical to QueueDequeueMany: * * This operation concatenates queue-element component tensors along the * 0th dimension to make a single component tensor. All of the components * in the dequeued tuple will have size n in the 0th dimension. * * This operation has {@code k} outputs, where {@code k} is the number of components in * the tuples stored in the given queue, and output {@code i} is the ith * component of the dequeued tuple. * * Arguments: * * scope: A Scope object * * handle: The handle to a queue. * * n: The number of tuples to dequeue. * * component_types: The type of each component in a tuple. * * Optional attributes (see {@code Attrs}): * * timeout_ms: If the queue has fewer than n elements, this operation * will block for up to timeout_ms milliseconds. * Note: This option is not supported yet. * * Returns: * * {@code OutputList}: One or more tensors that were dequeued as a tuple. */ @Namespace("tensorflow::ops") @NoOffset public static class QueueDequeueUpTo extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QueueDequeueUpTo(Pointer p) { super(p); } /** Optional attribute setters for QueueDequeueUpTo */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If the queue has fewer than n elements, this operation * will block for up to timeout_ms milliseconds. * Note: This option is not supported yet. * * Defaults to -1 */ public native @ByVal Attrs TimeoutMs(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long timeout_ms_(); public native Attrs timeout_ms_(long timeout_ms_); } public QueueDequeueUpTo(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input n, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types) { super((Pointer)null); allocate(scope, handle, n, component_types); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input n, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types); public QueueDequeueUpTo(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input n, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, handle, n, component_types, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input n, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator []") Output get(@Cast("size_t") long index); public static native @ByVal Attrs TimeoutMs(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native QueueDequeueUpTo operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector components(); public native QueueDequeueUpTo components(OutputVector components); } /** Dequeues a tuple of one or more tensors from the given queue. * * This operation has k outputs, where k is the number of components * in the tuples stored in the given queue, and output i is the ith * component of the dequeued tuple. * * N.B. If the queue is empty, this operation will block until an element * has been dequeued (or 'timeout_ms' elapses, if specified). * * Arguments: * * scope: A Scope object * * handle: The handle to a queue. * * component_types: The type of each component in a tuple. * * Optional attributes (see {@code Attrs}): * * timeout_ms: If the queue is empty, this operation will block for up to * timeout_ms milliseconds. * Note: This option is not supported yet. * * Returns: * * {@code OutputList}: One or more tensors that were dequeued as a tuple. */ @Namespace("tensorflow::ops") @NoOffset public static class QueueDequeue extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QueueDequeue(Pointer p) { super(p); } /** Optional attribute setters for QueueDequeue */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If the queue is empty, this operation will block for up to * timeout_ms milliseconds. * Note: This option is not supported yet. * * Defaults to -1 */ public native @ByVal Attrs TimeoutMs(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long timeout_ms_(); public native Attrs timeout_ms_(long timeout_ms_); } public QueueDequeue(@Const @ByRef Scope scope, @ByVal Input handle, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types) { super((Pointer)null); allocate(scope, handle, component_types); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types); public QueueDequeue(@Const @ByRef Scope scope, @ByVal Input handle, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, handle, component_types, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator []") Output get(@Cast("size_t") long index); public static native @ByVal Attrs TimeoutMs(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native QueueDequeue operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector components(); public native QueueDequeue components(OutputVector components); } /** Enqueues zero or more tuples of one or more tensors in the given queue. * * This operation slices each component tensor along the 0th dimension to * make multiple queue elements. All of the tuple components must have the * same size in the 0th dimension. * * The components input has k elements, which correspond to the components of * tuples stored in the given queue. * * N.B. If the queue is full, this operation will block until the given * elements have been enqueued (or 'timeout_ms' elapses, if specified). * * Arguments: * * scope: A Scope object * * handle: The handle to a queue. * * components: One or more tensors from which the enqueued tensors should * be taken. * * Optional attributes (see {@code Attrs}): * * timeout_ms: If the queue is too full, this operation will block for up * to timeout_ms milliseconds. * Note: This option is not supported yet. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class QueueEnqueueMany extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QueueEnqueueMany(Pointer p) { super(p); } /** Optional attribute setters for QueueEnqueueMany */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If the queue is too full, this operation will block for up * to timeout_ms milliseconds. * Note: This option is not supported yet. * * Defaults to -1 */ public native @ByVal Attrs TimeoutMs(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long timeout_ms_(); public native Attrs timeout_ms_(long timeout_ms_); } public QueueEnqueueMany(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal InputList components) { super((Pointer)null); allocate(scope, handle, components); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal InputList components); public QueueEnqueueMany(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal InputList components, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, handle, components, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal InputList components, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs TimeoutMs(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native QueueEnqueueMany operation(Operation operation); } /** Enqueues a tuple of one or more tensors in the given queue. * * The components input has k elements, which correspond to the components of * tuples stored in the given queue. * * N.B. If the queue is full, this operation will block until the given * element has been enqueued (or 'timeout_ms' elapses, if specified). * * Arguments: * * scope: A Scope object * * handle: The handle to a queue. * * components: One or more tensors from which the enqueued tensors should be taken. * * Optional attributes (see {@code Attrs}): * * timeout_ms: If the queue is full, this operation will block for up to * timeout_ms milliseconds. * Note: This option is not supported yet. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class QueueEnqueue extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QueueEnqueue(Pointer p) { super(p); } /** Optional attribute setters for QueueEnqueue */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If the queue is full, this operation will block for up to * timeout_ms milliseconds. * Note: This option is not supported yet. * * Defaults to -1 */ public native @ByVal Attrs TimeoutMs(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long timeout_ms_(); public native Attrs timeout_ms_(long timeout_ms_); } public QueueEnqueue(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal InputList components) { super((Pointer)null); allocate(scope, handle, components); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal InputList components); public QueueEnqueue(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal InputList components, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, handle, components, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal InputList components, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs TimeoutMs(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native QueueEnqueue operation(Operation operation); } /** Returns true if queue is closed. * * This operation returns true if the queue is closed and false if the queue * is open. * * Arguments: * * scope: A Scope object * * handle: The handle to a queue. * * Returns: * * {@code Output}: The is_closed tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class QueueIsClosed extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QueueIsClosed(Pointer p) { super(p); } public QueueIsClosed(@Const @ByRef Scope scope, @ByVal Input handle) { super((Pointer)null); allocate(scope, handle); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native QueueIsClosed operation(Operation operation); public native @ByRef Output is_closed(); public native QueueIsClosed is_closed(Output is_closed); } /** Returns true if queue is closed. * * This operation returns true if the queue is closed and false if the queue * is open. * * Arguments: * * scope: A Scope object * * handle: The handle to a queue. * * Returns: * * {@code Output}: The is_closed tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class QueueIsClosedV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QueueIsClosedV2(Pointer p) { super(p); } public QueueIsClosedV2(@Const @ByRef Scope scope, @ByVal Input handle) { super((Pointer)null); allocate(scope, handle); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native QueueIsClosedV2 operation(Operation operation); public native @ByRef Output is_closed(); public native QueueIsClosedV2 is_closed(Output is_closed); } /** Computes the number of elements in the given queue. * * Arguments: * * scope: A Scope object * * handle: The handle to a queue. * * Returns: * * {@code Output}: The number of elements in the given queue. */ @Namespace("tensorflow::ops") @NoOffset public static class QueueSize extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QueueSize(Pointer p) { super(p); } public QueueSize(@Const @ByRef Scope scope, @ByVal Input handle) { super((Pointer)null); allocate(scope, handle); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native QueueSize operation(Operation operation); public native @ByRef Output size(); public native QueueSize size(Output size); } /** A queue that randomizes the order of elements. * * Arguments: * * scope: A Scope object * * component_types: The type of each component in a value. * * Optional attributes (see {@code Attrs}): * * shapes: The shape of each component in a value. The length of this attr must * be either 0 or the same as the length of component_types. If the length of * this attr is 0, the shapes of queue elements are not constrained, and * only one element may be dequeued at a time. * * capacity: The upper bound on the number of elements in this queue. * Negative numbers mean no limit. * * min_after_dequeue: Dequeue will block unless there would be this * many elements after the dequeue or the queue is closed. This * ensures a minimum level of mixing of elements. * * seed: If either seed or seed2 is set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, a random seed is used. * * seed2: A second seed to avoid seed collision. * * container: If non-empty, this queue is placed in the given container. * Otherwise, a default container is used. * * shared_name: If non-empty, this queue will be shared under the given name * across multiple sessions. * * Returns: * * {@code Output}: The handle to the queue. */ @Namespace("tensorflow::ops") @NoOffset public static class RandomShuffleQueue extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RandomShuffleQueue(Pointer p) { super(p); } /** Optional attribute setters for RandomShuffleQueue */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The shape of each component in a value. The length of this attr must * be either 0 or the same as the length of component_types. If the length of * this attr is 0, the shapes of queue elements are not constrained, and * only one element may be dequeued at a time. * * Defaults to [] */ /// public native @ByVal Attrs Shapes(@ArraySlice PartialTensorShape x); /** The upper bound on the number of elements in this queue. * Negative numbers mean no limit. * * Defaults to -1 */ /// public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** Dequeue will block unless there would be this * many elements after the dequeue or the queue is closed. This * ensures a minimum level of mixing of elements. * * Defaults to 0 */ /// public native @ByVal Attrs MinAfterDequeue(@Cast("tensorflow::int64") long x); /** If either seed or seed2 is set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, a random seed is used. * * Defaults to 0 */ /// public native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); /** A second seed to avoid seed collision. * * Defaults to 0 */ /// public native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); /** If non-empty, this queue is placed in the given container. * Otherwise, a default container is used. * * Defaults to "" */ /// public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** If non-empty, this queue will be shared under the given name * across multiple sessions. * * Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @ArraySlice PartialTensorShape shapes_(); public native Attrs shapes_(PartialTensorShape shapes_); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @Cast("tensorflow::int64") long min_after_dequeue_(); public native Attrs min_after_dequeue_(long min_after_dequeue_); public native @Cast("tensorflow::int64") long seed_(); public native Attrs seed_(long seed_); public native @Cast("tensorflow::int64") long seed2_(); public native Attrs seed2_(long seed2_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public RandomShuffleQueue(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types) { super((Pointer)null); allocate(scope, component_types); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types); public RandomShuffleQueue(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, component_types, attrs); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector component_types, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Shapes(@ArraySlice PartialTensorShape x); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MinAfterDequeue(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native RandomShuffleQueue operation(Operation operation); public native @ByRef Output handle(); public native RandomShuffleQueue handle(Output handle); } /** Emits randomized records. * * Arguments: * * scope: A Scope object * * file_pattern: Glob pattern for the data files. * * Optional attributes (see {@code Attrs}): * * file_random_seed: Random seeds used to produce randomized records. * * file_shuffle_shift_ratio: Shifts the list of files after the list is randomly * shuffled. * * file_buffer_size: The randomization shuffling buffer. * * file_parallelism: How many sstables are opened and concurrently iterated over. * * batch_size: The batch size. * * compression_type: The type of compression for the file. Currently ZLIB and * GZIP are supported. Defaults to none. * * Returns: * * {@code Output}: A tensor of shape [batch_size]. */ @Namespace("tensorflow::ops") @NoOffset public static class RecordInput extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RecordInput(Pointer p) { super(p); } /** Optional attribute setters for RecordInput */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Random seeds used to produce randomized records. * * Defaults to 301 */ /// public native @ByVal Attrs FileRandomSeed(@Cast("tensorflow::int64") long x); /** Shifts the list of files after the list is randomly * shuffled. * * Defaults to 0 */ /// public native @ByVal Attrs FileShuffleShiftRatio(float x); /** The randomization shuffling buffer. * * Defaults to 10000 */ /// public native @ByVal Attrs FileBufferSize(@Cast("tensorflow::int64") long x); /** How many sstables are opened and concurrently iterated over. * * Defaults to 16 */ /// public native @ByVal Attrs FileParallelism(@Cast("tensorflow::int64") long x); /** The batch size. * * Defaults to 32 */ /// public native @ByVal Attrs BatchSize(@Cast("tensorflow::int64") long x); /** The type of compression for the file. Currently ZLIB and * GZIP are supported. Defaults to none. * * Defaults to "" */ public native @ByVal Attrs CompressionType(@StringPiece BytePointer x); public native @ByVal Attrs CompressionType(@StringPiece String x); public native @Cast("tensorflow::int64") long file_random_seed_(); public native Attrs file_random_seed_(long file_random_seed_); public native float file_shuffle_shift_ratio_(); public native Attrs file_shuffle_shift_ratio_(float file_shuffle_shift_ratio_); public native @Cast("tensorflow::int64") long file_buffer_size_(); public native Attrs file_buffer_size_(long file_buffer_size_); public native @Cast("tensorflow::int64") long file_parallelism_(); public native Attrs file_parallelism_(long file_parallelism_); public native @Cast("tensorflow::int64") long batch_size_(); public native Attrs batch_size_(long batch_size_); public native @StringPiece BytePointer compression_type_(); public native Attrs compression_type_(BytePointer compression_type_); } public RecordInput(@Const @ByRef Scope scope, @StringPiece BytePointer file_pattern) { super((Pointer)null); allocate(scope, file_pattern); } private native void allocate(@Const @ByRef Scope scope, @StringPiece BytePointer file_pattern); public RecordInput(@Const @ByRef Scope scope, @StringPiece String file_pattern) { super((Pointer)null); allocate(scope, file_pattern); } private native void allocate(@Const @ByRef Scope scope, @StringPiece String file_pattern); public RecordInput(@Const @ByRef Scope scope, @StringPiece BytePointer file_pattern, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, file_pattern, attrs); } private native void allocate(@Const @ByRef Scope scope, @StringPiece BytePointer file_pattern, @Const @ByRef Attrs attrs); public RecordInput(@Const @ByRef Scope scope, @StringPiece String file_pattern, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, file_pattern, attrs); } private native void allocate(@Const @ByRef Scope scope, @StringPiece String file_pattern, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs FileRandomSeed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs FileShuffleShiftRatio(float x); public static native @ByVal Attrs FileBufferSize(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs FileParallelism(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs BatchSize(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs CompressionType(@StringPiece BytePointer x); public static native @ByVal Attrs CompressionType(@StringPiece String x); public native @ByRef Operation operation(); public native RecordInput operation(Operation operation); public native @ByRef Output records(); public native RecordInput records(Output records); } /** Applies a sparse gradient to a given accumulator. * * Does not add if local_step is smaller than the accumulator's * global_step. * * Arguments: * * scope: A Scope object * * handle: The handle to a accumulator. * * local_step: The local_step value at which the sparse gradient was computed. * * gradient_indices: Indices of the sparse gradient to be accumulated. Must be a * vector. * * gradient_values: Values are the non-zero slices of the gradient, and must have * the same first dimension as indices, i.e., the nnz represented by indices and * values must be consistent. * * gradient_shape: Shape of the sparse gradient to be accumulated. * * has_known_shape: Boolean indicating whether gradient_shape is unknown, in which * case the input is ignored during validation. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class SparseAccumulatorApplyGradient extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseAccumulatorApplyGradient(Pointer p) { super(p); } public SparseAccumulatorApplyGradient(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input local_step, @ByVal Input gradient_indices, @ByVal Input gradient_values, @ByVal Input gradient_shape, @Cast("bool") boolean has_known_shape) { super((Pointer)null); allocate(scope, handle, local_step, gradient_indices, gradient_values, gradient_shape, has_known_shape); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input local_step, @ByVal Input gradient_indices, @ByVal Input gradient_values, @ByVal Input gradient_shape, @Cast("bool") boolean has_known_shape); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public native @ByRef Operation operation(); public native SparseAccumulatorApplyGradient operation(Operation operation); } /** Extracts the average sparse gradient in a SparseConditionalAccumulator. * * The op will blocks until sufficient (i.e., more than num_required) * gradients have been accumulated. If the accumulator has already * aggregated more than num_required gradients, it will return its * average of the accumulated gradients. Also automatically increments * the recorded global_step in the accumulator by 1, and resets the * aggregate to 0. * * Arguments: * * scope: A Scope object * * handle: The handle to a SparseConditionalAccumulator. * * num_required: Number of gradients required before we return an aggregate. * * dtype: The data type of accumulated gradients. Needs to correspond to the type * of the accumulator. * * Returns: * * {@code Output} indices: Indices of the average of the accumulated sparse gradients. * * {@code Output} values: Values of the average of the accumulated sparse gradients. * * {@code Output} shape: Shape of the average of the accumulated sparse gradients. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseAccumulatorTakeGradient extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseAccumulatorTakeGradient(Pointer p) { super(p); } public SparseAccumulatorTakeGradient(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input num_required, @Cast("tensorflow::DataType") int dtype) { super((Pointer)null); allocate(scope, handle, num_required, dtype); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input num_required, @Cast("tensorflow::DataType") int dtype); public native @ByRef Operation operation(); public native SparseAccumulatorTakeGradient operation(Operation operation); public native @ByRef Output indices(); public native SparseAccumulatorTakeGradient indices(Output indices); public native @ByRef Output values(); public native SparseAccumulatorTakeGradient values(Output values); public native @ByRef Output shape(); public native SparseAccumulatorTakeGradient shape(Output shape); } /** A conditional accumulator for aggregating sparse gradients. * * The accumulator accepts gradients marked with local_step greater or * equal to the most recent global_step known to the accumulator. The * average can be extracted from the accumulator, provided sufficient * gradients have been accumulated. Extracting the average automatically * resets the aggregate to 0, and increments the global_step recorded by * the accumulator. * * Arguments: * * scope: A Scope object * * dtype: The type of the value being accumulated. * * shape: The shape of the values. * * Optional attributes (see {@code Attrs}): * * container: If non-empty, this accumulator is placed in the given container. * Otherwise, a default container is used. * * shared_name: If non-empty, this accumulator will be shared under the given name * across multiple sessions. * * Returns: * * {@code Output}: The handle to the accumulator. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseConditionalAccumulator extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseConditionalAccumulator(Pointer p) { super(p); } /** Optional attribute setters for SparseConditionalAccumulator */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If non-empty, this accumulator is placed in the given container. * Otherwise, a default container is used. * * Defaults to "" */ /// public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** If non-empty, this accumulator will be shared under the given name * across multiple sessions. * * Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); /** Defaults to "MEAN" */ public native @ByVal Attrs ReductionType(@StringPiece BytePointer x); public native @ByVal Attrs ReductionType(@StringPiece String x); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); public native @StringPiece BytePointer reduction_type_(); public native Attrs reduction_type_(BytePointer reduction_type_); } public SparseConditionalAccumulator(@Const @ByRef Scope scope, @Cast("tensorflow::DataType") int dtype, @ByVal PartialTensorShape shape) { super((Pointer)null); allocate(scope, dtype, shape); } private native void allocate(@Const @ByRef Scope scope, @Cast("tensorflow::DataType") int dtype, @ByVal PartialTensorShape shape); public SparseConditionalAccumulator(@Const @ByRef Scope scope, @Cast("tensorflow::DataType") int dtype, @ByVal PartialTensorShape shape, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, dtype, shape, attrs); } private native void allocate(@Const @ByRef Scope scope, @Cast("tensorflow::DataType") int dtype, @ByVal PartialTensorShape shape, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public static native @ByVal Attrs ReductionType(@StringPiece BytePointer x); public static native @ByVal Attrs ReductionType(@StringPiece String x); public native @ByRef Operation operation(); public native SparseConditionalAccumulator operation(Operation operation); public native @ByRef Output handle(); public native SparseConditionalAccumulator handle(Output handle); } /** Stage values similar to a lightweight Enqueue. * * The basic functionality of this Op is similar to a queue with many * fewer capabilities and options. This Op is optimized for performance. * * Arguments: * * scope: A Scope object * * values: a list of tensors * dtypes A list of data types that inserted values should adhere to. * * Optional attributes (see {@code Attrs}): * * capacity: Maximum number of elements in the Staging Area. If > 0, inserts * on the container will block when the capacity is reached. * * memory_limit: The maximum number of bytes allowed for Tensors in the Staging Area. * If > 0, inserts will block until sufficient space is available. * * container: If non-empty, this queue is placed in the given container. Otherwise, * a default container is used. * * shared_name: It is necessary to match this name to the matching Unstage Op. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class Stage extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Stage(Pointer p) { super(p); } /** Optional attribute setters for Stage */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Maximum number of elements in the Staging Area. If > 0, inserts * on the container will block when the capacity is reached. * * Defaults to 0 */ /// public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** The maximum number of bytes allowed for Tensors in the Staging Area. * If > 0, inserts will block until sufficient space is available. * * Defaults to 0 */ /// public native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); /** If non-empty, this queue is placed in the given container. Otherwise, * a default container is used. * * Defaults to "" */ /// public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** It is necessary to match this name to the matching Unstage Op. * * Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @Cast("tensorflow::int64") long memory_limit_(); public native Attrs memory_limit_(long memory_limit_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public Stage(@Const @ByRef Scope scope, @ByVal InputList values) { super((Pointer)null); allocate(scope, values); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList values); public Stage(@Const @ByRef Scope scope, @ByVal InputList values, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, values, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList values, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native Stage operation(Operation operation); } /** Op removes all elements in the underlying container. * * Arguments: * * scope: A Scope object * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class StageClear extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StageClear(Pointer p) { super(p); } /** Optional attribute setters for StageClear */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); /** Defaults to "" */ public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @Cast("tensorflow::int64") long memory_limit_(); public native Attrs memory_limit_(long memory_limit_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public StageClear(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes) { super((Pointer)null); allocate(scope, dtypes); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes); public StageClear(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, dtypes, attrs); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native StageClear operation(Operation operation); } /** Op peeks at the values at the specified index. If the * * underlying container does not contain sufficient elements * this op will block until it does. This Op is optimized for * performance. * * Arguments: * * scope: A Scope object * * Returns: * * {@code OutputList}: The values tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class StagePeek extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StagePeek(Pointer p) { super(p); } /** Optional attribute setters for StagePeek */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); /** Defaults to "" */ public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @Cast("tensorflow::int64") long memory_limit_(); public native Attrs memory_limit_(long memory_limit_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public StagePeek(@Const @ByRef Scope scope, @ByVal Input index, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes) { super((Pointer)null); allocate(scope, index, dtypes); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input index, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes); public StagePeek(@Const @ByRef Scope scope, @ByVal Input index, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, index, dtypes, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input index, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator []") Output get(@Cast("size_t") long index); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native StagePeek operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector values(); public native StagePeek values(OutputVector values); } /** Op returns the number of elements in the underlying container. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The size tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class StageSize extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StageSize(Pointer p) { super(p); } /** Optional attribute setters for StageSize */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); /** Defaults to "" */ public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @Cast("tensorflow::int64") long memory_limit_(); public native Attrs memory_limit_(long memory_limit_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public StageSize(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes) { super((Pointer)null); allocate(scope, dtypes); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes); public StageSize(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, dtypes, attrs); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native StageSize operation(Operation operation); public native @ByRef Output size(); public native StageSize size(Output size); } /** Delete the TensorArray from its resource container. * * This enables the user to close and release the resource in the middle * of a step/run. * * Arguments: * * scope: A Scope object * * handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad). * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class TensorArrayClose extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorArrayClose(Pointer p) { super(p); } public TensorArrayClose(@Const @ByRef Scope scope, @ByVal Input handle) { super((Pointer)null); allocate(scope, handle); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public native @ByRef Operation operation(); public native TensorArrayClose operation(Operation operation); } /** Concat the elements from the TensorArray into value {@code value}. * * Takes {@code T} elements of shapes * *
{@code
 *    (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
 *    }
* * and concatenates them into a Tensor of shape: * *
{@code (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)}
* * All elements must have the same shape (excepting the first dimension). * * Arguments: * * scope: A Scope object * * handle: The handle to a TensorArray. * * flow_in: A float scalar that enforces proper chaining of operations. * * dtype: The type of the elem that is returned. * * Optional attributes (see {@code Attrs}): * * element_shape_except0: The expected shape of an element, if known, * excluding the first dimension. Used to validate the shapes of * TensorArray elements. If this shape is not fully specified, concatenating * zero-size TensorArrays is an error. * * Returns: * * {@code Output} value: All of the elements in the TensorArray, concatenated along the first * axis. * * {@code Output} lengths: A vector of the row sizes of the original T elements in the * value output. In the example above, this would be the values: * {@code (n1, n2, ..., n(T-1))}. */ @Namespace("tensorflow::ops") @NoOffset public static class TensorArrayConcat extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorArrayConcat(Pointer p) { super(p); } /** Optional attribute setters for TensorArrayConcat */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The expected shape of an element, if known, * excluding the first dimension. Used to validate the shapes of * TensorArray elements. If this shape is not fully specified, concatenating * zero-size TensorArrays is an error. * * Defaults to */ public native @ByVal Attrs ElementShapeExcept0(@ByVal PartialTensorShape x); public native @ByRef PartialTensorShape element_shape_except0_(); public native Attrs element_shape_except0_(PartialTensorShape element_shape_except0_); } public TensorArrayConcat(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input flow_in, @Cast("tensorflow::DataType") int dtype) { super((Pointer)null); allocate(scope, handle, flow_in, dtype); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input flow_in, @Cast("tensorflow::DataType") int dtype); public TensorArrayConcat(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input flow_in, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, handle, flow_in, dtype, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input flow_in, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs); public static native @ByVal Attrs ElementShapeExcept0(@ByVal PartialTensorShape x); public native @ByRef Operation operation(); public native TensorArrayConcat operation(Operation operation); public native @ByRef Output value(); public native TensorArrayConcat value(Output value); public native @ByRef Output lengths(); public native TensorArrayConcat lengths(Output lengths); } /** Gather specific elements from the TensorArray into output {@code value}. * * All elements selected by {@code indices} must have the same shape. * * Arguments: * * scope: A Scope object * * handle: The handle to a TensorArray. * * indices: The locations in the TensorArray from which to read tensor elements. * * flow_in: A float scalar that enforces proper chaining of operations. * * dtype: The type of the elem that is returned. * * Optional attributes (see {@code Attrs}): * * element_shape: The expected shape of an element, if known. Used to * validate the shapes of TensorArray elements. If this shape is not * fully specified, gathering zero-size TensorArrays is an error. * * Returns: * * {@code Output}: All of the elements in the TensorArray, concatenated along a new * axis (the new dimension 0). */ @Namespace("tensorflow::ops") @NoOffset public static class TensorArrayGather extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorArrayGather(Pointer p) { super(p); } /** Optional attribute setters for TensorArrayGather */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The expected shape of an element, if known. Used to * validate the shapes of TensorArray elements. If this shape is not * fully specified, gathering zero-size TensorArrays is an error. * * Defaults to */ public native @ByVal Attrs ElementShape(@ByVal PartialTensorShape x); public native @ByRef PartialTensorShape element_shape_(); public native Attrs element_shape_(PartialTensorShape element_shape_); } public TensorArrayGather(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input indices, @ByVal Input flow_in, @Cast("tensorflow::DataType") int dtype) { super((Pointer)null); allocate(scope, handle, indices, flow_in, dtype); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input indices, @ByVal Input flow_in, @Cast("tensorflow::DataType") int dtype); public TensorArrayGather(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input indices, @ByVal Input flow_in, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, handle, indices, flow_in, dtype, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input indices, @ByVal Input flow_in, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs ElementShape(@ByVal PartialTensorShape x); public native @ByRef Operation operation(); public native TensorArrayGather operation(Operation operation); public native @ByRef Output value(); public native TensorArrayGather value(Output value); } /** Creates a TensorArray for storing the gradients of values in the given handle. * * If the given TensorArray gradient already exists, returns a reference to it. * * Locks the size of the original TensorArray by disabling its dynamic size flag. * * **A note about the input flow_in:** * * The handle flow_in forces the execution of the gradient lookup to occur * only after certain other operations have occurred. For example, when * the forward TensorArray is dynamically sized, writes to this TensorArray * may resize the object. The gradient TensorArray is statically sized based * on the size of the forward TensorArray when this operation executes. * Furthermore, the size of the forward TensorArray is frozen by this call. * As a result, the flow is used to ensure that the call to generate the gradient * TensorArray only happens after all writes are executed. * * In the case of dynamically sized TensorArrays, gradient computation should * only be performed on read operations that have themselves been chained via * flow to occur only after all writes have executed. That way the final size * of the forward TensorArray is known when this operation is called. * * **A note about the source attribute:** * * TensorArray gradient calls use an accumulator TensorArray object. If * multiple gradients are calculated and run in the same session, the multiple * gradient nodes may accidentally flow through the same accumulator TensorArray. * This double counts and generally breaks the TensorArray gradient flow. * * The solution is to identify which gradient call this particular * TensorArray gradient is being called in. This is performed by identifying * a unique string (e.g. "gradients", "gradients_1", ...) from the input * gradient Tensor's name. This string is used as a suffix when creating * the TensorArray gradient object here (the attribute {@code source}). * * The attribute {@code source} is added as a suffix to the forward TensorArray's * name when performing the creation / lookup, so that each separate gradient * calculation gets its own TensorArray accumulator. * * Arguments: * * scope: A Scope object * * handle: The handle to the forward TensorArray. * * flow_in: A float scalar that enforces proper chaining of operations. * * source: The gradient source string, used to decide which gradient TensorArray * to return. * * Returns: * * {@code Output} grad_handle * * {@code Output} flow_out */ @Namespace("tensorflow::ops") @NoOffset public static class TensorArrayGrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorArrayGrad(Pointer p) { super(p); } public TensorArrayGrad(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input flow_in, @StringPiece BytePointer source) { super((Pointer)null); allocate(scope, handle, flow_in, source); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input flow_in, @StringPiece BytePointer source); public TensorArrayGrad(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input flow_in, @StringPiece String source) { super((Pointer)null); allocate(scope, handle, flow_in, source); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input flow_in, @StringPiece String source); public native @ByRef Operation operation(); public native TensorArrayGrad operation(Operation operation); public native @ByRef Output grad_handle(); public native TensorArrayGrad grad_handle(Output grad_handle); public native @ByRef Output flow_out(); public native TensorArrayGrad flow_out(Output flow_out); } /** Creates a TensorArray for storing multiple gradients of values in the given handle. * * Similar to TensorArrayGradV3. However it creates an accumulator with an * expanded shape compared to the input TensorArray whose gradient is being * computed. This enables multiple gradients for the same TensorArray to be * calculated using the same accumulator. * * Arguments: * * scope: A Scope object * * handle: The handle to the forward TensorArray. * * flow_in: A float scalar that enforces proper chaining of operations. * * shape_to_prepend: An int32 vector representing a shape. Elements in the gradient accumulator will * have shape which is this shape_to_prepend value concatenated with shape of the * elements in the TensorArray corresponding to the input handle. * * source: The gradient source string, used to decide which gradient TensorArray * to return. * * Returns: * * {@code Output} grad_handle * * {@code Output} flow_out */ @Namespace("tensorflow::ops") @NoOffset public static class TensorArrayGradWithShape extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorArrayGradWithShape(Pointer p) { super(p); } public TensorArrayGradWithShape(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input flow_in, @ByVal Input shape_to_prepend, @StringPiece BytePointer source) { super((Pointer)null); allocate(scope, handle, flow_in, shape_to_prepend, source); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input flow_in, @ByVal Input shape_to_prepend, @StringPiece BytePointer source); public TensorArrayGradWithShape(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input flow_in, @ByVal Input shape_to_prepend, @StringPiece String source) { super((Pointer)null); allocate(scope, handle, flow_in, shape_to_prepend, source); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input flow_in, @ByVal Input shape_to_prepend, @StringPiece String source); public native @ByRef Operation operation(); public native TensorArrayGradWithShape operation(Operation operation); public native @ByRef Output grad_handle(); public native TensorArrayGradWithShape grad_handle(Output grad_handle); public native @ByRef Output flow_out(); public native TensorArrayGradWithShape flow_out(Output flow_out); } /** Read an element from the TensorArray into output {@code value}. * * Arguments: * * scope: A Scope object * * handle: The handle to a TensorArray. * * flow_in: A float scalar that enforces proper chaining of operations. * * dtype: The type of the elem that is returned. * * Returns: * * {@code Output}: The tensor that is read from the TensorArray. */ @Namespace("tensorflow::ops") @NoOffset public static class TensorArrayRead extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorArrayRead(Pointer p) { super(p); } public TensorArrayRead(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input index, @ByVal Input flow_in, @Cast("tensorflow::DataType") int dtype) { super((Pointer)null); allocate(scope, handle, index, flow_in, dtype); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input index, @ByVal Input flow_in, @Cast("tensorflow::DataType") int dtype); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native TensorArrayRead operation(Operation operation); public native @ByRef Output value(); public native TensorArrayRead value(Output value); } /** Scatter the data from the input value into specific TensorArray elements. * * {@code indices} must be a vector, its length must match the first dim of {@code value}. * * Arguments: * * scope: A Scope object * * handle: The handle to a TensorArray. * * indices: The locations at which to write the tensor elements. * * value: The concatenated tensor to write to the TensorArray. * * flow_in: A float scalar that enforces proper chaining of operations. * * Returns: * * {@code Output}: A float scalar that enforces proper chaining of operations. */ @Namespace("tensorflow::ops") @NoOffset public static class TensorArrayScatter extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorArrayScatter(Pointer p) { super(p); } public TensorArrayScatter(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input indices, @ByVal Input value, @ByVal Input flow_in) { super((Pointer)null); allocate(scope, handle, indices, value, flow_in); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input indices, @ByVal Input value, @ByVal Input flow_in); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native TensorArrayScatter operation(Operation operation); public native @ByRef Output flow_out(); public native TensorArrayScatter flow_out(Output flow_out); } /** Get the current size of the TensorArray. * * Arguments: * * scope: A Scope object * * handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad). * * flow_in: A float scalar that enforces proper chaining of operations. * * Returns: * * {@code Output}: The current size of the TensorArray. */ @Namespace("tensorflow::ops") @NoOffset public static class TensorArraySize extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorArraySize(Pointer p) { super(p); } public TensorArraySize(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input flow_in) { super((Pointer)null); allocate(scope, handle, flow_in); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input flow_in); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native TensorArraySize operation(Operation operation); public native @ByRef Output size(); public native TensorArraySize size(Output size); } /** Split the data from the input value into TensorArray elements. * * Assuming that {@code lengths} takes on values * *
{@code (n0, n1, ..., n(T-1))}
* * and that {@code value} has shape * *
{@code (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)}
, * * this splits values into a TensorArray with T tensors. * * TensorArray index t will be the subtensor of values with starting position * *
{@code (n0 + n1 + ... + n(t-1), 0, 0, ...)}
* * and having size * *
{@code nt x d0 x d1 x ...}
* * Arguments: * * scope: A Scope object * * handle: The handle to a TensorArray. * * value: The concatenated tensor to write to the TensorArray. * * lengths: The vector of lengths, how to split the rows of value into the * TensorArray. * * flow_in: A float scalar that enforces proper chaining of operations. * * Returns: * * {@code Output}: A float scalar that enforces proper chaining of operations. */ @Namespace("tensorflow::ops") @NoOffset public static class TensorArraySplit extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorArraySplit(Pointer p) { super(p); } public TensorArraySplit(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input value, @ByVal Input lengths, @ByVal Input flow_in) { super((Pointer)null); allocate(scope, handle, value, lengths, flow_in); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input value, @ByVal Input lengths, @ByVal Input flow_in); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native TensorArraySplit operation(Operation operation); public native @ByRef Output flow_out(); public native TensorArraySplit flow_out(Output flow_out); } /** An array of Tensors of given size. * * Write data via Write and read via Read or Pack. * * Arguments: * * scope: A Scope object * * size: The size of the array. * * dtype: The type of the elements on the tensor_array. * * Optional attributes (see {@code Attrs}): * * element_shape: The expected shape of an element, if known. Used to * validate the shapes of TensorArray elements. If this shape is not * fully specified, gathering zero-size TensorArrays is an error. * * dynamic_size: A boolean that determines whether writes to the TensorArray * are allowed to grow the size. By default, this is not allowed. * * clear_after_read: If true (default), Tensors in the TensorArray are cleared * after being read. This disables multiple read semantics but allows early * release of memory. * * identical_element_shapes: If true (default is false), then all * elements in the TensorArray will be expected to have have identical shapes. * This allows certain behaviors, like dynamically checking for * consistent shapes on write, and being able to fill in properly * shaped zero tensors on stack -- even if the element_shape attribute * is not fully defined. * * tensor_array_name: Overrides the name used for the temporary tensor_array * resource. Default value is the name of the 'TensorArray' op (which * is guaranteed unique). * * Returns: * * {@code Output} handle: The handle to the TensorArray. * * {@code Output} flow: A scalar used to control gradient flow. */ @Namespace("tensorflow::ops") @NoOffset public static class TensorArray extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorArray(Pointer p) { super(p); } /** Optional attribute setters for TensorArray */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The expected shape of an element, if known. Used to * validate the shapes of TensorArray elements. If this shape is not * fully specified, gathering zero-size TensorArrays is an error. * * Defaults to */ /// public native @ByVal Attrs ElementShape(@ByVal PartialTensorShape x); /** A boolean that determines whether writes to the TensorArray * are allowed to grow the size. By default, this is not allowed. * * Defaults to false */ /// public native @ByVal Attrs DynamicSize(@Cast("bool") boolean x); /** If true (default), Tensors in the TensorArray are cleared * after being read. This disables multiple read semantics but allows early * release of memory. * * Defaults to true */ /// public native @ByVal Attrs ClearAfterRead(@Cast("bool") boolean x); /** If true (default is false), then all * elements in the TensorArray will be expected to have have identical shapes. * This allows certain behaviors, like dynamically checking for * consistent shapes on write, and being able to fill in properly * shaped zero tensors on stack -- even if the element_shape attribute * is not fully defined. * * Defaults to false */ /// public native @ByVal Attrs IdenticalElementShapes(@Cast("bool") boolean x); /** Overrides the name used for the temporary tensor_array * resource. Default value is the name of the 'TensorArray' op (which * is guaranteed unique). * * Defaults to "" */ public native @ByVal Attrs TensorArrayName(@StringPiece BytePointer x); public native @ByVal Attrs TensorArrayName(@StringPiece String x); public native @ByRef PartialTensorShape element_shape_(); public native Attrs element_shape_(PartialTensorShape element_shape_); public native @Cast("bool") boolean dynamic_size_(); public native Attrs dynamic_size_(boolean dynamic_size_); public native @Cast("bool") boolean clear_after_read_(); public native Attrs clear_after_read_(boolean clear_after_read_); public native @Cast("bool") boolean identical_element_shapes_(); public native Attrs identical_element_shapes_(boolean identical_element_shapes_); public native @StringPiece BytePointer tensor_array_name_(); public native Attrs tensor_array_name_(BytePointer tensor_array_name_); } public TensorArray(@Const @ByRef Scope scope, @ByVal Input size, @Cast("tensorflow::DataType") int dtype) { super((Pointer)null); allocate(scope, size, dtype); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input size, @Cast("tensorflow::DataType") int dtype); public TensorArray(@Const @ByRef Scope scope, @ByVal Input size, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, size, dtype, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input size, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs); public static native @ByVal Attrs ElementShape(@ByVal PartialTensorShape x); public static native @ByVal Attrs DynamicSize(@Cast("bool") boolean x); public static native @ByVal Attrs ClearAfterRead(@Cast("bool") boolean x); public static native @ByVal Attrs IdenticalElementShapes(@Cast("bool") boolean x); public static native @ByVal Attrs TensorArrayName(@StringPiece BytePointer x); public static native @ByVal Attrs TensorArrayName(@StringPiece String x); public native @ByRef Operation operation(); public native TensorArray operation(Operation operation); public native @ByRef Output handle(); public native TensorArray handle(Output handle); public native @ByRef Output flow(); public native TensorArray flow(Output flow); } /** Push an element onto the tensor_array. * * Arguments: * * scope: A Scope object * * handle: The handle to a TensorArray. * * index: The position to write to inside the TensorArray. * * value: The tensor to write to the TensorArray. * * flow_in: A float scalar that enforces proper chaining of operations. * * Returns: * * {@code Output}: A float scalar that enforces proper chaining of operations. */ @Namespace("tensorflow::ops") @NoOffset public static class TensorArrayWrite extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorArrayWrite(Pointer p) { super(p); } public TensorArrayWrite(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input index, @ByVal Input value, @ByVal Input flow_in) { super((Pointer)null); allocate(scope, handle, index, value, flow_in); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input handle, @ByVal Input index, @ByVal Input value, @ByVal Input flow_in); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native TensorArrayWrite operation(Operation operation); public native @ByRef Output flow_out(); public native TensorArrayWrite flow_out(Output flow_out); } /** Op is similar to a lightweight Dequeue. * * The basic functionality is similar to dequeue with many fewer * capabilities and options. This Op is optimized for performance. * * Arguments: * * scope: A Scope object * * Returns: * * {@code OutputList}: The values tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Unstage extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Unstage(Pointer p) { super(p); } /** Optional attribute setters for Unstage */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); /** Defaults to "" */ public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @Cast("tensorflow::int64") long capacity_(); public native Attrs capacity_(long capacity_); public native @Cast("tensorflow::int64") long memory_limit_(); public native Attrs memory_limit_(long memory_limit_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public Unstage(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes) { super((Pointer)null); allocate(scope, dtypes); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes); public Unstage(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, dtypes, attrs); } private native void allocate(@Const @ByRef Scope scope, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator []") Output get(@Cast("size_t") long index); public static native @ByVal Attrs Capacity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MemoryLimit(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native Unstage operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector values(); public native Unstage values(OutputVector values); } /** \} */ // namespace ops // namespace tensorflow // #endif // TENSORFLOW_CC_OPS_DATA_FLOW_OPS_H_ // Parsed from tensorflow/cc/ops/image_ops.h // This file is MACHINE GENERATED! Do not edit. // #ifndef TENSORFLOW_CC_OPS_IMAGE_OPS_H_ // #define TENSORFLOW_CC_OPS_IMAGE_OPS_H_ // This file is MACHINE GENERATED! Do not edit. // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/cc/framework/scope.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/gtl/array_slice.h" /** \defgroup image_ops Image Ops * \{

* Adjust the contrast of one or more images. * * {@code images} is a tensor of at least 3 dimensions. The last 3 dimensions are * interpreted as {@code [height, width, channels]}. The other dimensions only * represent a collection of images, such as {@code [batch, height, width, channels].} * * Contrast is adjusted independently for each channel of each image. * * For each channel, the Op first computes the mean of the image pixels in the * channel and then adjusts each component of each pixel to * {@code (x - mean) * contrast_factor + mean}. * * Arguments: * * scope: A Scope object * * images: Images to adjust. At least 3-D. * * contrast_factor: A float multiplier for adjusting contrast. * * Returns: * * {@code Output}: The contrast-adjusted image or images. */ @Namespace("tensorflow::ops") @NoOffset public static class AdjustContrast extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdjustContrast(Pointer p) { super(p); } public AdjustContrast(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input contrast_factor) { super((Pointer)null); allocate(scope, images, contrast_factor); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input contrast_factor); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native AdjustContrast operation(Operation operation); public native @ByRef Output output(); public native AdjustContrast output(Output output); } /** Adjust the hue of one or more images. * * {@code images} is a tensor of at least 3 dimensions. The last dimension is * interpretted as channels, and must be three. * * The input image is considered in the RGB colorspace. Conceptually, the RGB * colors are first mapped into HSV. A delta is then applied all the hue values, * and then remapped back to RGB colorspace. * * Arguments: * * scope: A Scope object * * images: Images to adjust. At least 3-D. * * delta: A float delta to add to the hue. * * Returns: * * {@code Output}: The hue-adjusted image or images. */ @Namespace("tensorflow::ops") @NoOffset public static class AdjustHue extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdjustHue(Pointer p) { super(p); } public AdjustHue(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input delta) { super((Pointer)null); allocate(scope, images, delta); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input delta); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native AdjustHue operation(Operation operation); public native @ByRef Output output(); public native AdjustHue output(Output output); } /** Adjust the saturation of one or more images. * * {@code images} is a tensor of at least 3 dimensions. The last dimension is * interpretted as channels, and must be three. * * The input image is considered in the RGB colorspace. Conceptually, the RGB * colors are first mapped into HSV. A scale is then applied all the saturation * values, and then remapped back to RGB colorspace. * * Arguments: * * scope: A Scope object * * images: Images to adjust. At least 3-D. * * scale: A float scale to add to the saturation. * * Returns: * * {@code Output}: The hue-adjusted image or images. */ @Namespace("tensorflow::ops") @NoOffset public static class AdjustSaturation extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdjustSaturation(Pointer p) { super(p); } public AdjustSaturation(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input scale) { super((Pointer)null); allocate(scope, images, scale); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input scale); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native AdjustSaturation operation(Operation operation); public native @ByRef Output output(); public native AdjustSaturation output(Output output); } /** Extracts crops from the input image tensor and resizes them. * * Extracts crops from the input image tensor and resizes them using bilinear * sampling or nearest neighbor sampling (possibly with aspect ratio change) to a * common output size specified by {@code crop_size}. This is more general than the * {@code crop_to_bounding_box} op which extracts a fixed size slice from the input image * and does not allow resizing or aspect ratio change. * * Returns a tensor with {@code crops} from the input {@code image} at positions defined at the * bounding box locations in {@code boxes}. The cropped boxes are all resized (with * bilinear or nearest neighbor interpolation) to a fixed * {@code size = [crop_height, crop_width]}. The result is a 4-D tensor * {@code [num_boxes, crop_height, crop_width, depth]}. The resizing is corner aligned. * In particular, if {@code boxes = [[0, 0, 1, 1]]}, the method will give identical * results to using {@code tf.image.resize_bilinear()} or * {@code tf.image.resize_nearest_neighbor()}(depends on the {@code method} argument) with * {@code align_corners=True}. * * Arguments: * * scope: A Scope object * * image: A 4-D tensor of shape {@code [batch, image_height, image_width, depth]}. * Both {@code image_height} and {@code image_width} need to be positive. * * boxes: A 2-D tensor of shape {@code [num_boxes, 4]}. The {@code i}-th row of the tensor * specifies the coordinates of a box in the {@code box_ind[i]} image and is specified * in normalized coordinates {@code [y1, x1, y2, x2]}. A normalized coordinate value of * {@code y} is mapped to the image coordinate at {@code y * (image_height - 1)}, so as the * {@code [0, 1]} interval of normalized image height is mapped to * {@code [0, image_height - 1]} in image height coordinates. We do allow {@code y1} > {@code y2}, in * which case the sampled crop is an up-down flipped version of the original * image. The width dimension is treated similarly. Normalized coordinates * outside the {@code [0, 1]} range are allowed, in which case we use * {@code extrapolation_value} to extrapolate the input image values. * * box_ind: A 1-D tensor of shape {@code [num_boxes]} with int32 values in {@code [0, batch)}. * The value of {@code box_ind[i]} specifies the image that the {@code i}-th box refers to. * * crop_size: A 1-D tensor of 2 elements, {@code size = [crop_height, crop_width]}. All * cropped image patches are resized to this size. The aspect ratio of the image * content is not preserved. Both {@code crop_height} and {@code crop_width} need to be * positive. * * Optional attributes (see {@code Attrs}): * * method: A string specifying the sampling method for resizing. It can be either * {@code "bilinear"} or {@code "nearest"} and default to {@code "bilinear"}. Currently two sampling * methods are supported: Bilinear and Nearest Neighbor. * * extrapolation_value: Value used for extrapolation, when applicable. * * Returns: * * {@code Output}: A 4-D tensor of shape {@code [num_boxes, crop_height, crop_width, depth]}. */ @Namespace("tensorflow::ops") @NoOffset public static class CropAndResize extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CropAndResize(Pointer p) { super(p); } /** Optional attribute setters for CropAndResize */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** A string specifying the sampling method for resizing. It can be either * {@code "bilinear"} or {@code "nearest"} and default to {@code "bilinear"}. Currently two sampling * methods are supported: Bilinear and Nearest Neighbor. * * Defaults to "bilinear" */ /// public native @ByVal Attrs Method(@StringPiece BytePointer x); public native @ByVal Attrs Method(@StringPiece String x); /** Value used for extrapolation, when applicable. * * Defaults to 0 */ public native @ByVal Attrs ExtrapolationValue(float x); public native @StringPiece BytePointer method_(); public native Attrs method_(BytePointer method_); public native float extrapolation_value_(); public native Attrs extrapolation_value_(float extrapolation_value_); } public CropAndResize(@Const @ByRef Scope scope, @ByVal Input image, @ByVal Input boxes, @ByVal Input box_ind, @ByVal Input crop_size) { super((Pointer)null); allocate(scope, image, boxes, box_ind, crop_size); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input image, @ByVal Input boxes, @ByVal Input box_ind, @ByVal Input crop_size); public CropAndResize(@Const @ByRef Scope scope, @ByVal Input image, @ByVal Input boxes, @ByVal Input box_ind, @ByVal Input crop_size, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, image, boxes, box_ind, crop_size, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input image, @ByVal Input boxes, @ByVal Input box_ind, @ByVal Input crop_size, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Method(@StringPiece BytePointer x); public static native @ByVal Attrs Method(@StringPiece String x); public static native @ByVal Attrs ExtrapolationValue(float x); public native @ByRef Operation operation(); public native CropAndResize operation(Operation operation); public native @ByRef Output crops(); public native CropAndResize crops(Output crops); } /** Computes the gradient of the crop_and_resize op wrt the input boxes tensor. * * Arguments: * * scope: A Scope object * * grads: A 4-D tensor of shape {@code [num_boxes, crop_height, crop_width, depth]}. * * image: A 4-D tensor of shape {@code [batch, image_height, image_width, depth]}. * Both {@code image_height} and {@code image_width} need to be positive. * * boxes: A 2-D tensor of shape {@code [num_boxes, 4]}. The {@code i}-th row of the tensor * specifies the coordinates of a box in the {@code box_ind[i]} image and is specified * in normalized coordinates {@code [y1, x1, y2, x2]}. A normalized coordinate value of * {@code y} is mapped to the image coordinate at {@code y * (image_height - 1)}, so as the * {@code [0, 1]} interval of normalized image height is mapped to * {@code [0, image_height - 1] in image height coordinates. We do allow y1 > y2, in * which case the sampled crop is an up-down flipped version of the original * image. The width dimension is treated similarly. Normalized coordinates * outside the }[0, 1]{@code range are allowed, in which case we use * }extrapolation_value{@code to extrapolate the input image values. * * box_ind: A 1-D tensor of shape }[num_boxes]{@code with int32 values in }[0, batch){@code . * The value of }box_ind[i]{@code specifies the image that the }i{@code -th box refers to. * * Optional attributes (see }Attrs{@code ): * * method: A string specifying the interpolation method. Only 'bilinear' is * supported for now. * * Returns: * * }Output{@code : A 2-D tensor of shape }[num_boxes, 4]{@code . */ @Namespace("tensorflow::ops") @NoOffset public static class CropAndResizeGradBoxes extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CropAndResizeGradBoxes(Pointer p) { super(p); } /** Optional attribute setters for CropAndResizeGradBoxes */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** A string specifying the interpolation method. Only 'bilinear' is * supported for now. * * Defaults to "bilinear" */ public native @ByVal Attrs Method(@StringPiece BytePointer x); public native @ByVal Attrs Method(@StringPiece String x); public native @StringPiece BytePointer method_(); public native Attrs method_(BytePointer method_); } public CropAndResizeGradBoxes(@Const @ByRef Scope scope, @ByVal Input grads, @ByVal Input image, @ByVal Input boxes, @ByVal Input box_ind) { super((Pointer)null); allocate(scope, grads, image, boxes, box_ind); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input grads, @ByVal Input image, @ByVal Input boxes, @ByVal Input box_ind); public CropAndResizeGradBoxes(@Const @ByRef Scope scope, @ByVal Input grads, @ByVal Input image, @ByVal Input boxes, @ByVal Input box_ind, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, grads, image, boxes, box_ind, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input grads, @ByVal Input image, @ByVal Input boxes, @ByVal Input box_ind, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Method(@StringPiece BytePointer x); public static native @ByVal Attrs Method(@StringPiece String x); public native @ByRef Operation operation(); public native CropAndResizeGradBoxes operation(Operation operation); public native @ByRef Output output(); public native CropAndResizeGradBoxes output(Output output); } /** Computes the gradient of the crop_and_resize op wrt the input image tensor. * * Arguments: * * scope: A Scope object * * grads: A 4-D tensor of shape {@code [num_boxes, crop_height, crop_width, depth]}. * * boxes: A 2-D tensor of shape {@code [num_boxes, 4]}. The {@code i}-th row of the tensor * specifies the coordinates of a box in the {@code box_ind[i]} image and is specified * in normalized coordinates {@code [y1, x1, y2, x2]}. A normalized coordinate value of * {@code y} is mapped to the image coordinate at {@code y * (image_height - 1)}, so as the * {@code [0, 1]} interval of normalized image height is mapped to * {@code [0, image_height - 1] in image height coordinates. We do allow y1 > y2, in * which case the sampled crop is an up-down flipped version of the original * image. The width dimension is treated similarly. Normalized coordinates * outside the }[0, 1]{@code range are allowed, in which case we use * }extrapolation_value{@code to extrapolate the input image values. * * box_ind: A 1-D tensor of shape }[num_boxes]{@code with int32 values in }[0, batch){@code . * The value of }box_ind[i]{@code specifies the image that the }i{@code -th box refers to. * * image_size: A 1-D tensor with value }[batch, image_height, image_width, depth]{@code * containing the original image size. Both }image_height{@code and }image_width{@code need * to be positive. * * Optional attributes (see }Attrs{@code ): * * method: A string specifying the interpolation method. Only 'bilinear' is * supported for now. * * Returns: * * }Output{@code : A 4-D tensor of shape }[batch, image_height, image_width, depth]{@code . */ @Namespace("tensorflow::ops") @NoOffset public static class CropAndResizeGradImage extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CropAndResizeGradImage(Pointer p) { super(p); } /** Optional attribute setters for CropAndResizeGradImage */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** A string specifying the interpolation method. Only 'bilinear' is * supported for now. * * Defaults to "bilinear" */ public native @ByVal Attrs Method(@StringPiece BytePointer x); public native @ByVal Attrs Method(@StringPiece String x); public native @StringPiece BytePointer method_(); public native Attrs method_(BytePointer method_); } public CropAndResizeGradImage(@Const @ByRef Scope scope, @ByVal Input grads, @ByVal Input boxes, @ByVal Input box_ind, @ByVal Input image_size, @Cast("tensorflow::DataType") int T) { super((Pointer)null); allocate(scope, grads, boxes, box_ind, image_size, T); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input grads, @ByVal Input boxes, @ByVal Input box_ind, @ByVal Input image_size, @Cast("tensorflow::DataType") int T); public CropAndResizeGradImage(@Const @ByRef Scope scope, @ByVal Input grads, @ByVal Input boxes, @ByVal Input box_ind, @ByVal Input image_size, @Cast("tensorflow::DataType") int T, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, grads, boxes, box_ind, image_size, T, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input grads, @ByVal Input boxes, @ByVal Input box_ind, @ByVal Input image_size, @Cast("tensorflow::DataType") int T, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Method(@StringPiece BytePointer x); public static native @ByVal Attrs Method(@StringPiece String x); public native @ByRef Operation operation(); public native CropAndResizeGradImage operation(Operation operation); public native @ByRef Output output(); public native CropAndResizeGradImage output(Output output); } /** Decode and Crop a JPEG-encoded image to a uint8 tensor. * * The attr {@code channels} indicates the desired number of color channels for the * decoded image. * * Accepted values are: * * * 0: Use the number of channels in the JPEG-encoded image. * * 1: output a grayscale image. * * 3: output an RGB image. * * If needed, the JPEG-encoded image is transformed to match the requested number * of color channels. * * The attr {@code ratio} allows downscaling the image by an integer factor during * decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than * downscaling the image later. * * * It is equivalent to a combination of decode and crop, but much faster by only * decoding partial jpeg image. * * Arguments: * * scope: A Scope object * * contents: 0-D. The JPEG-encoded image. * * crop_window: 1-D. The crop window: [crop_y, crop_x, crop_height, crop_width]. * * Optional attributes (see {@code Attrs}): * * channels: Number of color channels for the decoded image. * * ratio: Downscaling ratio. * * fancy_upscaling: If true use a slower but nicer upscaling of the * chroma planes (yuv420/422 only). * * try_recover_truncated: If true try to recover an image from truncated input. * * acceptable_fraction: The minimum required fraction of lines before a truncated * input is accepted. * * dct_method: string specifying a hint about the algorithm used for * decompression. Defaults to "" which maps to a system-specific * default. Currently valid values are ["INTEGER_FAST", * "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal * jpeg library changes to a version that does not have that specific * option.) * * Returns: * * {@code Output}: 3-D with shape {@code [height, width, channels]}.. */ @Namespace("tensorflow::ops") @NoOffset public static class DecodeAndCropJpeg extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DecodeAndCropJpeg(Pointer p) { super(p); } /** Optional attribute setters for DecodeAndCropJpeg */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Number of color channels for the decoded image. * * Defaults to 0 */ /// public native @ByVal Attrs Channels(@Cast("tensorflow::int64") long x); /** Downscaling ratio. * * Defaults to 1 */ /// public native @ByVal Attrs Ratio(@Cast("tensorflow::int64") long x); /** If true use a slower but nicer upscaling of the * chroma planes (yuv420/422 only). * * Defaults to true */ /// public native @ByVal Attrs FancyUpscaling(@Cast("bool") boolean x); /** If true try to recover an image from truncated input. * * Defaults to false */ /// public native @ByVal Attrs TryRecoverTruncated(@Cast("bool") boolean x); /** The minimum required fraction of lines before a truncated * input is accepted. * * Defaults to 1 */ /// public native @ByVal Attrs AcceptableFraction(float x); /** string specifying a hint about the algorithm used for * decompression. Defaults to "" which maps to a system-specific * default. Currently valid values are ["INTEGER_FAST", * "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal * jpeg library changes to a version that does not have that specific * option.) * * Defaults to "" */ public native @ByVal Attrs DctMethod(@StringPiece BytePointer x); public native @ByVal Attrs DctMethod(@StringPiece String x); public native @Cast("tensorflow::int64") long channels_(); public native Attrs channels_(long channels_); public native @Cast("tensorflow::int64") long ratio_(); public native Attrs ratio_(long ratio_); public native @Cast("bool") boolean fancy_upscaling_(); public native Attrs fancy_upscaling_(boolean fancy_upscaling_); public native @Cast("bool") boolean try_recover_truncated_(); public native Attrs try_recover_truncated_(boolean try_recover_truncated_); public native float acceptable_fraction_(); public native Attrs acceptable_fraction_(float acceptable_fraction_); public native @StringPiece BytePointer dct_method_(); public native Attrs dct_method_(BytePointer dct_method_); } public DecodeAndCropJpeg(@Const @ByRef Scope scope, @ByVal Input contents, @ByVal Input crop_window) { super((Pointer)null); allocate(scope, contents, crop_window); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input contents, @ByVal Input crop_window); public DecodeAndCropJpeg(@Const @ByRef Scope scope, @ByVal Input contents, @ByVal Input crop_window, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, contents, crop_window, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input contents, @ByVal Input crop_window, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Channels(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Ratio(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs FancyUpscaling(@Cast("bool") boolean x); public static native @ByVal Attrs TryRecoverTruncated(@Cast("bool") boolean x); public static native @ByVal Attrs AcceptableFraction(float x); public static native @ByVal Attrs DctMethod(@StringPiece BytePointer x); public static native @ByVal Attrs DctMethod(@StringPiece String x); public native @ByRef Operation operation(); public native DecodeAndCropJpeg operation(Operation operation); public native @ByRef Output image(); public native DecodeAndCropJpeg image(Output image); } /** Decode the first frame of a BMP-encoded image to a uint8 tensor. * * The attr {@code channels} indicates the desired number of color channels for the * decoded image. * * Accepted values are: * * * 0: Use the number of channels in the BMP-encoded image. * * 3: output an RGB image. * * 4: output an RGBA image. * * Arguments: * * scope: A Scope object * * contents: 0-D. The BMP-encoded image. * * Returns: * * {@code Output}: 3-D with shape {@code [height, width, channels]}. RGB order */ @Namespace("tensorflow::ops") @NoOffset public static class DecodeBmp extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DecodeBmp(Pointer p) { super(p); } /** Optional attribute setters for DecodeBmp */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs Channels(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long channels_(); public native Attrs channels_(long channels_); } public DecodeBmp(@Const @ByRef Scope scope, @ByVal Input contents) { super((Pointer)null); allocate(scope, contents); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input contents); public DecodeBmp(@Const @ByRef Scope scope, @ByVal Input contents, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, contents, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input contents, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Channels(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native DecodeBmp operation(Operation operation); public native @ByRef Output image(); public native DecodeBmp image(Output image); } /** Decode the first frame of a GIF-encoded image to a uint8 tensor. * * GIF with frame or transparency compression are not supported * convert animated GIF from compressed to uncompressed by: * * convert $src.gif -coalesce $dst.gif * * This op also supports decoding JPEGs and PNGs, though it is cleaner to use * {@code tf.image.decode_image}. * * Arguments: * * scope: A Scope object * * contents: 0-D. The GIF-encoded image. * * Returns: * * {@code Output}: 4-D with shape {@code [num_frames, height, width, 3]}. RGB order */ @Namespace("tensorflow::ops") @NoOffset public static class DecodeGif extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DecodeGif(Pointer p) { super(p); } public DecodeGif(@Const @ByRef Scope scope, @ByVal Input contents) { super((Pointer)null); allocate(scope, contents); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input contents); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native DecodeGif operation(Operation operation); public native @ByRef Output image(); public native DecodeGif image(Output image); } /** Decode a JPEG-encoded image to a uint8 tensor. * * The attr {@code channels} indicates the desired number of color channels for the * decoded image. * * Accepted values are: * * * 0: Use the number of channels in the JPEG-encoded image. * * 1: output a grayscale image. * * 3: output an RGB image. * * If needed, the JPEG-encoded image is transformed to match the requested number * of color channels. * * The attr {@code ratio} allows downscaling the image by an integer factor during * decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than * downscaling the image later. * * * This op also supports decoding PNGs and non-animated GIFs since the interface is * the same, though it is cleaner to use {@code tf.image.decode_image}. * * Arguments: * * scope: A Scope object * * contents: 0-D. The JPEG-encoded image. * * Optional attributes (see {@code Attrs}): * * channels: Number of color channels for the decoded image. * * ratio: Downscaling ratio. * * fancy_upscaling: If true use a slower but nicer upscaling of the * chroma planes (yuv420/422 only). * * try_recover_truncated: If true try to recover an image from truncated input. * * acceptable_fraction: The minimum required fraction of lines before a truncated * input is accepted. * * dct_method: string specifying a hint about the algorithm used for * decompression. Defaults to "" which maps to a system-specific * default. Currently valid values are ["INTEGER_FAST", * "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal * jpeg library changes to a version that does not have that specific * option.) * * Returns: * * {@code Output}: 3-D with shape {@code [height, width, channels]}.. */ @Namespace("tensorflow::ops") @NoOffset public static class DecodeJpeg extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DecodeJpeg(Pointer p) { super(p); } /** Optional attribute setters for DecodeJpeg */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Number of color channels for the decoded image. * * Defaults to 0 */ /// public native @ByVal Attrs Channels(@Cast("tensorflow::int64") long x); /** Downscaling ratio. * * Defaults to 1 */ /// public native @ByVal Attrs Ratio(@Cast("tensorflow::int64") long x); /** If true use a slower but nicer upscaling of the * chroma planes (yuv420/422 only). * * Defaults to true */ /// public native @ByVal Attrs FancyUpscaling(@Cast("bool") boolean x); /** If true try to recover an image from truncated input. * * Defaults to false */ /// public native @ByVal Attrs TryRecoverTruncated(@Cast("bool") boolean x); /** The minimum required fraction of lines before a truncated * input is accepted. * * Defaults to 1 */ /// public native @ByVal Attrs AcceptableFraction(float x); /** string specifying a hint about the algorithm used for * decompression. Defaults to "" which maps to a system-specific * default. Currently valid values are ["INTEGER_FAST", * "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal * jpeg library changes to a version that does not have that specific * option.) * * Defaults to "" */ public native @ByVal Attrs DctMethod(@StringPiece BytePointer x); public native @ByVal Attrs DctMethod(@StringPiece String x); public native @Cast("tensorflow::int64") long channels_(); public native Attrs channels_(long channels_); public native @Cast("tensorflow::int64") long ratio_(); public native Attrs ratio_(long ratio_); public native @Cast("bool") boolean fancy_upscaling_(); public native Attrs fancy_upscaling_(boolean fancy_upscaling_); public native @Cast("bool") boolean try_recover_truncated_(); public native Attrs try_recover_truncated_(boolean try_recover_truncated_); public native float acceptable_fraction_(); public native Attrs acceptable_fraction_(float acceptable_fraction_); public native @StringPiece BytePointer dct_method_(); public native Attrs dct_method_(BytePointer dct_method_); } public DecodeJpeg(@Const @ByRef Scope scope, @ByVal Input contents) { super((Pointer)null); allocate(scope, contents); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input contents); public DecodeJpeg(@Const @ByRef Scope scope, @ByVal Input contents, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, contents, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input contents, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Channels(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Ratio(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs FancyUpscaling(@Cast("bool") boolean x); public static native @ByVal Attrs TryRecoverTruncated(@Cast("bool") boolean x); public static native @ByVal Attrs AcceptableFraction(float x); public static native @ByVal Attrs DctMethod(@StringPiece BytePointer x); public static native @ByVal Attrs DctMethod(@StringPiece String x); public native @ByRef Operation operation(); public native DecodeJpeg operation(Operation operation); public native @ByRef Output image(); public native DecodeJpeg image(Output image); } /** Decode a PNG-encoded image to a uint8 or uint16 tensor. * * The attr {@code channels} indicates the desired number of color channels for the * decoded image. * * Accepted values are: * * * 0: Use the number of channels in the PNG-encoded image. * * 1: output a grayscale image. * * 3: output an RGB image. * * 4: output an RGBA image. * * If needed, the PNG-encoded image is transformed to match the requested number * of color channels. * * This op also supports decoding JPEGs and non-animated GIFs since the interface * is the same, though it is cleaner to use {@code tf.image.decode_image}. * * Arguments: * * scope: A Scope object * * contents: 0-D. The PNG-encoded image. * * Optional attributes (see {@code Attrs}): * * channels: Number of color channels for the decoded image. * * Returns: * * {@code Output}: 3-D with shape {@code [height, width, channels]}. */ @Namespace("tensorflow::ops") @NoOffset public static class DecodePng extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DecodePng(Pointer p) { super(p); } /** Optional attribute setters for DecodePng */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Number of color channels for the decoded image. * * Defaults to 0 */ public native @ByVal Attrs Channels(@Cast("tensorflow::int64") long x); /** Defaults to DT_UINT8 */ public native @ByVal Attrs Dtype(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::int64") long channels_(); public native Attrs channels_(long channels_); public native @Cast("tensorflow::DataType") int dtype_(); public native Attrs dtype_(int dtype_); } public DecodePng(@Const @ByRef Scope scope, @ByVal Input contents) { super((Pointer)null); allocate(scope, contents); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input contents); public DecodePng(@Const @ByRef Scope scope, @ByVal Input contents, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, contents, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input contents, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Channels(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Dtype(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native DecodePng operation(Operation operation); public native @ByRef Output image(); public native DecodePng image(Output image); } /** Draw bounding boxes on a batch of images. * * Outputs a copy of {@code images} but draws on top of the pixels zero or more bounding * boxes specified by the locations in {@code boxes}. The coordinates of the each * bounding box in {@code boxes} are encoded as {@code [y_min, x_min, y_max, x_max]}. The * bounding box coordinates are floats in {@code [0.0, 1.0]} relative to the width and * height of the underlying image. * * For example, if an image is 100 x 200 pixels (height x width) and the bounding * box is {@code [0.1, 0.2, 0.5, 0.9]}, the upper-left and bottom-right coordinates of * the bounding box will be {@code (40, 10)} to {@code (180, 50)} (in (x,y) coordinates). * * Parts of the bounding box may fall outside the image. * * Arguments: * * scope: A Scope object * * images: 4-D with shape {@code [batch, height, width, depth]}. A batch of images. * * boxes: 3-D with shape {@code [batch, num_bounding_boxes, 4]} containing bounding * boxes. * * Returns: * * {@code Output}: 4-D with the same shape as {@code images}. The batch of input images with * bounding boxes drawn on the images. */ @Namespace("tensorflow::ops") @NoOffset public static class DrawBoundingBoxes extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DrawBoundingBoxes(Pointer p) { super(p); } public DrawBoundingBoxes(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input boxes) { super((Pointer)null); allocate(scope, images, boxes); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input boxes); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native DrawBoundingBoxes operation(Operation operation); public native @ByRef Output output(); public native DrawBoundingBoxes output(Output output); } /** JPEG-encode an image. * * {@code image} is a 3-D uint8 Tensor of shape {@code [height, width, channels]}. * * The attr {@code format} can be used to override the color format of the encoded * output. Values can be: * * * {@code ''}: Use a default format based on the number of channels in the image. * * {@code grayscale}: Output a grayscale JPEG image. The {@code channels} dimension * of {@code image} must be 1. * * {@code rgb}: Output an RGB JPEG image. The {@code channels} dimension * of {@code image} must be 3. * * If {@code format} is not specified or is the empty string, a default format is picked * in function of the number of channels in {@code image}: * * * 1: Output a grayscale image. * * 3: Output an RGB image. * * Arguments: * * scope: A Scope object * * image: 3-D with shape {@code [height, width, channels]}. * * Optional attributes (see {@code Attrs}): * * format: Per pixel image format. * * quality: Quality of the compression from 0 to 100 (higher is better and slower). * * progressive: If True, create a JPEG that loads progressively (coarse to fine). * * optimize_size: If True, spend CPU/RAM to reduce size with no quality change. * * chroma_downsampling: See http://en.wikipedia.org/wiki/Chroma_subsampling. * * density_unit: Unit used to specify {@code x_density} and {@code y_density}: * pixels per inch ({@code 'in'}) or centimeter ({@code 'cm'}). * * x_density: Horizontal pixels per density unit. * * y_density: Vertical pixels per density unit. * * xmp_metadata: If not empty, embed this XMP metadata in the image header. * * Returns: * * {@code Output}: 0-D. JPEG-encoded image. */ @Namespace("tensorflow::ops") @NoOffset public static class EncodeJpeg extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EncodeJpeg(Pointer p) { super(p); } /** Optional attribute setters for EncodeJpeg */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Per pixel image format. * * Defaults to "" */ /// public native @ByVal Attrs Format(@StringPiece BytePointer x); public native @ByVal Attrs Format(@StringPiece String x); /** Quality of the compression from 0 to 100 (higher is better and slower). * * Defaults to 95 */ /// public native @ByVal Attrs Quality(@Cast("tensorflow::int64") long x); /** If True, create a JPEG that loads progressively (coarse to fine). * * Defaults to false */ /// public native @ByVal Attrs Progressive(@Cast("bool") boolean x); /** If True, spend CPU/RAM to reduce size with no quality change. * * Defaults to false */ /// public native @ByVal Attrs OptimizeSize(@Cast("bool") boolean x); /** See http://en.wikipedia.org/wiki/Chroma_subsampling. * * Defaults to true */ /// public native @ByVal Attrs ChromaDownsampling(@Cast("bool") boolean x); /** Unit used to specify {@code x_density} and {@code y_density}: * pixels per inch ({@code 'in'}) or centimeter ({@code 'cm'}). * * Defaults to "in" */ /// public native @ByVal Attrs DensityUnit(@StringPiece BytePointer x); public native @ByVal Attrs DensityUnit(@StringPiece String x); /** Horizontal pixels per density unit. * * Defaults to 300 */ /// public native @ByVal Attrs XDensity(@Cast("tensorflow::int64") long x); /** Vertical pixels per density unit. * * Defaults to 300 */ /// public native @ByVal Attrs YDensity(@Cast("tensorflow::int64") long x); /** If not empty, embed this XMP metadata in the image header. * * Defaults to "" */ public native @ByVal Attrs XmpMetadata(@StringPiece BytePointer x); public native @ByVal Attrs XmpMetadata(@StringPiece String x); public native @StringPiece BytePointer format_(); public native Attrs format_(BytePointer format_); public native @Cast("tensorflow::int64") long quality_(); public native Attrs quality_(long quality_); public native @Cast("bool") boolean progressive_(); public native Attrs progressive_(boolean progressive_); public native @Cast("bool") boolean optimize_size_(); public native Attrs optimize_size_(boolean optimize_size_); public native @Cast("bool") boolean chroma_downsampling_(); public native Attrs chroma_downsampling_(boolean chroma_downsampling_); public native @StringPiece BytePointer density_unit_(); public native Attrs density_unit_(BytePointer density_unit_); public native @Cast("tensorflow::int64") long x_density_(); public native Attrs x_density_(long x_density_); public native @Cast("tensorflow::int64") long y_density_(); public native Attrs y_density_(long y_density_); public native @StringPiece BytePointer xmp_metadata_(); public native Attrs xmp_metadata_(BytePointer xmp_metadata_); } public EncodeJpeg(@Const @ByRef Scope scope, @ByVal Input image) { super((Pointer)null); allocate(scope, image); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input image); public EncodeJpeg(@Const @ByRef Scope scope, @ByVal Input image, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, image, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input image, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Format(@StringPiece BytePointer x); public static native @ByVal Attrs Format(@StringPiece String x); public static native @ByVal Attrs Quality(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Progressive(@Cast("bool") boolean x); public static native @ByVal Attrs OptimizeSize(@Cast("bool") boolean x); public static native @ByVal Attrs ChromaDownsampling(@Cast("bool") boolean x); public static native @ByVal Attrs DensityUnit(@StringPiece BytePointer x); public static native @ByVal Attrs DensityUnit(@StringPiece String x); public static native @ByVal Attrs XDensity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs YDensity(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs XmpMetadata(@StringPiece BytePointer x); public static native @ByVal Attrs XmpMetadata(@StringPiece String x); public native @ByRef Operation operation(); public native EncodeJpeg operation(Operation operation); public native @ByRef Output contents(); public native EncodeJpeg contents(Output contents); } /** PNG-encode an image. * * {@code image} is a 3-D uint8 or uint16 Tensor of shape {@code [height, width, channels]} * where {@code channels} is: * * * 1: for grayscale. * * 2: for grayscale + alpha. * * 3: for RGB. * * 4: for RGBA. * * The ZLIB compression level, {@code compression}, can be -1 for the PNG-encoder * default or a value from 0 to 9. 9 is the highest compression level, generating * the smallest output, but is slower. * * Arguments: * * scope: A Scope object * * image: 3-D with shape {@code [height, width, channels]}. * * Optional attributes (see {@code Attrs}): * * compression: Compression level. * * Returns: * * {@code Output}: 0-D. PNG-encoded image. */ @Namespace("tensorflow::ops") @NoOffset public static class EncodePng extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EncodePng(Pointer p) { super(p); } /** Optional attribute setters for EncodePng */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Compression level. * * Defaults to -1 */ public native @ByVal Attrs Compression(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long compression_(); public native Attrs compression_(long compression_); } public EncodePng(@Const @ByRef Scope scope, @ByVal Input image) { super((Pointer)null); allocate(scope, image); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input image); public EncodePng(@Const @ByRef Scope scope, @ByVal Input image, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, image, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input image, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Compression(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native EncodePng operation(Operation operation); public native @ByRef Output contents(); public native EncodePng contents(Output contents); } /** Extracts a glimpse from the input tensor. * * Returns a set of windows called glimpses extracted at location * {@code offsets} from the input tensor. If the windows only partially * overlaps the inputs, the non overlapping areas will be filled with * random noise. * * The result is a 4-D tensor of shape {@code [batch_size, glimpse_height, * glimpse_width, channels]}. The channels and batch dimensions are the * same as that of the input tensor. The height and width of the output * windows are specified in the {@code size} parameter. * * The argument {@code normalized} and {@code centered} controls how the windows are built: * * * If the coordinates are normalized but not centered, 0.0 and 1.0 * correspond to the minimum and maximum of each height and width * dimension. * * If the coordinates are both normalized and centered, they range from * -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper * left corner, the lower right corner is located at (1.0, 1.0) and the * center is at (0, 0). * * If the coordinates are not normalized they are interpreted as * numbers of pixels. * * Arguments: * * scope: A Scope object * * input: A 4-D float tensor of shape {@code [batch_size, height, width, channels]}. * * size: A 1-D tensor of 2 elements containing the size of the glimpses * to extract. The glimpse height must be specified first, following * by the glimpse width. * * offsets: A 2-D integer tensor of shape {@code [batch_size, 2]} containing * the y, x locations of the center of each window. * * Optional attributes (see {@code Attrs}): * * centered: indicates if the offset coordinates are centered relative to * the image, in which case the (0, 0) offset is relative to the center * of the input images. If false, the (0,0) offset corresponds to the * upper left corner of the input images. * * normalized: indicates if the offset coordinates are normalized. * * uniform_noise: indicates if the noise should be generated using a * uniform distribution or a Gaussian distribution. * * Returns: * * {@code Output}: A tensor representing the glimpses {@code [batch_size, * glimpse_height, glimpse_width, channels]}. */ @Namespace("tensorflow::ops") @NoOffset public static class ExtractGlimpse extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ExtractGlimpse(Pointer p) { super(p); } /** Optional attribute setters for ExtractGlimpse */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** indicates if the offset coordinates are centered relative to * the image, in which case the (0, 0) offset is relative to the center * of the input images. If false, the (0,0) offset corresponds to the * upper left corner of the input images. * * Defaults to true */ /// public native @ByVal Attrs Centered(@Cast("bool") boolean x); /** indicates if the offset coordinates are normalized. * * Defaults to true */ /// public native @ByVal Attrs Normalized(@Cast("bool") boolean x); /** indicates if the noise should be generated using a * uniform distribution or a Gaussian distribution. * * Defaults to true */ public native @ByVal Attrs UniformNoise(@Cast("bool") boolean x); public native @Cast("bool") boolean centered_(); public native Attrs centered_(boolean centered_); public native @Cast("bool") boolean normalized_(); public native Attrs normalized_(boolean normalized_); public native @Cast("bool") boolean uniform_noise_(); public native Attrs uniform_noise_(boolean uniform_noise_); } public ExtractGlimpse(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input offsets) { super((Pointer)null); allocate(scope, input, size, offsets); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input offsets); public ExtractGlimpse(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input offsets, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, size, offsets, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input offsets, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Centered(@Cast("bool") boolean x); public static native @ByVal Attrs Normalized(@Cast("bool") boolean x); public static native @ByVal Attrs UniformNoise(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ExtractGlimpse operation(Operation operation); public native @ByRef Output glimpse(); public native ExtractGlimpse glimpse(Output glimpse); } /** Extract the shape information of a JPEG-encoded image. * * This op only parses the image header, so it is much faster than DecodeJpeg. * * Arguments: * * scope: A Scope object * * contents: 0-D. The JPEG-encoded image. * * Optional attributes (see {@code Attrs}): * * output_type: (Optional) The output type of the operation (int32 or int64). * Defaults to int32. * * Returns: * * {@code Output}: 1-D. The image shape with format [height, width, channels]. */ @Namespace("tensorflow::ops") @NoOffset public static class ExtractJpegShape extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ExtractJpegShape(Pointer p) { super(p); } /** Optional attribute setters for ExtractJpegShape */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** (Optional) The output type of the operation (int32 or int64). * Defaults to int32. * * Defaults to DT_INT32 */ public native @ByVal Attrs OutputType(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int output_type_(); public native Attrs output_type_(int output_type_); } public ExtractJpegShape(@Const @ByRef Scope scope, @ByVal Input contents) { super((Pointer)null); allocate(scope, contents); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input contents); public ExtractJpegShape(@Const @ByRef Scope scope, @ByVal Input contents, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, contents, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input contents, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs OutputType(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native ExtractJpegShape operation(Operation operation); public native @ByRef Output image_shape(); public native ExtractJpegShape image_shape(Output image_shape); } /** Convert one or more images from HSV to RGB. * * Outputs a tensor of the same shape as the {@code images} tensor, containing the RGB * value of the pixels. The output is only well defined if the value in {@code images} * are in {@code [0,1]}. * * See {@code rgb_to_hsv} for a description of the HSV encoding. * * Arguments: * * scope: A Scope object * * images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3. * * Returns: * * {@code Output}: {@code images} converted to RGB. */ @Namespace("tensorflow::ops") @NoOffset public static class HSVToRGB extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HSVToRGB(Pointer p) { super(p); } public HSVToRGB(@Const @ByRef Scope scope, @ByVal Input images) { super((Pointer)null); allocate(scope, images); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native HSVToRGB operation(Operation operation); public native @ByRef Output output(); public native HSVToRGB output(Output output); } /** Greedily selects a subset of bounding boxes in descending order of score, * * pruning away boxes that have high intersection-over-union (IOU) overlap * with previously selected boxes. Bounding boxes are supplied as * [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any * diagonal pair of box corners and the coordinates can be provided as normalized * (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm * is agnostic to where the origin is in the coordinate system. Note that this * algorithm is invariant to orthogonal transformations and translations * of the coordinate system; thus translating or reflections of the coordinate * system result in the same boxes being selected by the algorithm. * The output of this operation is a set of integers indexing into the input * collection of bounding boxes representing the selected boxes. The bounding * box coordinates corresponding to the selected indices can then be obtained * using the {@code tf.gather operation}. For example: * selected_indices = tf.image.non_max_suppression( * boxes, scores, max_output_size, iou_threshold) * selected_boxes = tf.gather(boxes, selected_indices) * * Arguments: * * scope: A Scope object * * boxes: A 2-D float tensor of shape {@code [num_boxes, 4]}. * * scores: A 1-D float tensor of shape {@code [num_boxes]} representing a single * score corresponding to each box (each row of boxes). * * max_output_size: A scalar integer tensor representing the maximum number of * boxes to be selected by non max suppression. * * Optional attributes (see {@code Attrs}): * * iou_threshold: A float representing the threshold for deciding whether boxes * overlap too much with respect to IOU. * * Returns: * * {@code Output}: A 1-D integer tensor of shape {@code [M]} representing the selected * indices from the boxes tensor, where {@code M <= max_output_size}. */ @Namespace("tensorflow::ops") @NoOffset public static class NonMaxSuppression extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NonMaxSuppression(Pointer p) { super(p); } /** Optional attribute setters for NonMaxSuppression */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** A float representing the threshold for deciding whether boxes * overlap too much with respect to IOU. * * Defaults to 0.5 */ public native @ByVal Attrs IouThreshold(float x); public native float iou_threshold_(); public native Attrs iou_threshold_(float iou_threshold_); } public NonMaxSuppression(@Const @ByRef Scope scope, @ByVal Input boxes, @ByVal Input scores, @ByVal Input max_output_size) { super((Pointer)null); allocate(scope, boxes, scores, max_output_size); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input boxes, @ByVal Input scores, @ByVal Input max_output_size); public NonMaxSuppression(@Const @ByRef Scope scope, @ByVal Input boxes, @ByVal Input scores, @ByVal Input max_output_size, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, boxes, scores, max_output_size, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input boxes, @ByVal Input scores, @ByVal Input max_output_size, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs IouThreshold(float x); public native @ByRef Operation operation(); public native NonMaxSuppression operation(Operation operation); public native @ByRef Output selected_indices(); public native NonMaxSuppression selected_indices(Output selected_indices); } /** Greedily selects a subset of bounding boxes in descending order of score, * * pruning away boxes that have high intersection-over-union (IOU) overlap * with previously selected boxes. Bounding boxes are supplied as * [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any * diagonal pair of box corners and the coordinates can be provided as normalized * (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm * is agnostic to where the origin is in the coordinate system. Note that this * algorithm is invariant to orthogonal transformations and translations * of the coordinate system; thus translating or reflections of the coordinate * system result in the same boxes being selected by the algorithm. * * The output of this operation is a set of integers indexing into the input * collection of bounding boxes representing the selected boxes. The bounding * box coordinates corresponding to the selected indices can then be obtained * using the {@code tf.gather operation}. For example: * * selected_indices = tf.image.non_max_suppression_v2( * boxes, scores, max_output_size, iou_threshold) * selected_boxes = tf.gather(boxes, selected_indices) * * Arguments: * * scope: A Scope object * * boxes: A 2-D float tensor of shape {@code [num_boxes, 4]}. * * scores: A 1-D float tensor of shape {@code [num_boxes]} representing a single * score corresponding to each box (each row of boxes). * * max_output_size: A scalar integer tensor representing the maximum number of * boxes to be selected by non max suppression. * * iou_threshold: A 0-D float tensor representing the threshold for deciding whether * boxes overlap too much with respect to IOU. * * Returns: * * {@code Output}: A 1-D integer tensor of shape {@code [M]} representing the selected * indices from the boxes tensor, where {@code M <= max_output_size}. */ @Namespace("tensorflow::ops") @NoOffset public static class NonMaxSuppressionV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NonMaxSuppressionV2(Pointer p) { super(p); } public NonMaxSuppressionV2(@Const @ByRef Scope scope, @ByVal Input boxes, @ByVal Input scores, @ByVal Input max_output_size, @ByVal Input iou_threshold) { super((Pointer)null); allocate(scope, boxes, scores, max_output_size, iou_threshold); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input boxes, @ByVal Input scores, @ByVal Input max_output_size, @ByVal Input iou_threshold); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native NonMaxSuppressionV2 operation(Operation operation); public native @ByRef Output selected_indices(); public native NonMaxSuppressionV2 selected_indices(Output selected_indices); } /** Greedily selects a subset of bounding boxes in descending order of score, * * pruning away boxes that have high intersection-over-union (IOU) overlap * with previously selected boxes. Bounding boxes with score less than * {@code score_threshold} are removed. Bounding boxes are supplied as * [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any * diagonal pair of box corners and the coordinates can be provided as normalized * (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm * is agnostic to where the origin is in the coordinate system and more * generally is invariant to orthogonal transformations and translations * of the coordinate system; thus translating or reflections of the coordinate * system result in the same boxes being selected by the algorithm. * The output of this operation is a set of integers indexing into the input * collection of bounding boxes representing the selected boxes. The bounding * box coordinates corresponding to the selected indices can then be obtained * using the {@code tf.gather operation}. For example: * selected_indices = tf.image.non_max_suppression_v2( * boxes, scores, max_output_size, iou_threshold, score_threshold) * selected_boxes = tf.gather(boxes, selected_indices) * * Arguments: * * scope: A Scope object * * boxes: A 2-D float tensor of shape {@code [num_boxes, 4]}. * * scores: A 1-D float tensor of shape {@code [num_boxes]} representing a single * score corresponding to each box (each row of boxes). * * max_output_size: A scalar integer tensor representing the maximum number of * boxes to be selected by non max suppression. * * iou_threshold: A 0-D float tensor representing the threshold for deciding whether * boxes overlap too much with respect to IOU. * * score_threshold: A 0-D float tensor representing the threshold for deciding when to remove * boxes based on score. * * Returns: * * {@code Output}: A 1-D integer tensor of shape {@code [M]} representing the selected * indices from the boxes tensor, where {@code M <= max_output_size}. */ @Namespace("tensorflow::ops") @NoOffset public static class NonMaxSuppressionV3 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NonMaxSuppressionV3(Pointer p) { super(p); } public NonMaxSuppressionV3(@Const @ByRef Scope scope, @ByVal Input boxes, @ByVal Input scores, @ByVal Input max_output_size, @ByVal Input iou_threshold, @ByVal Input score_threshold) { super((Pointer)null); allocate(scope, boxes, scores, max_output_size, iou_threshold, score_threshold); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input boxes, @ByVal Input scores, @ByVal Input max_output_size, @ByVal Input iou_threshold, @ByVal Input score_threshold); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native NonMaxSuppressionV3 operation(Operation operation); public native @ByRef Output selected_indices(); public native NonMaxSuppressionV3 selected_indices(Output selected_indices); } /** Greedily selects a subset of bounding boxes in descending order of score, * * pruning away boxes that have high intersection-over-union (IOU) overlap * with previously selected boxes. Bounding boxes with score less than * {@code score_threshold} are removed. Bounding boxes are supplied as * [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any * diagonal pair of box corners and the coordinates can be provided as normalized * (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm * is agnostic to where the origin is in the coordinate system and more * generally is invariant to orthogonal transformations and translations * of the coordinate system; thus translating or reflections of the coordinate * system result in the same boxes being selected by the algorithm. * The output of this operation is a set of integers indexing into the input * collection of bounding boxes representing the selected boxes. The bounding * box coordinates corresponding to the selected indices can then be obtained * using the {@code tf.gather operation}. For example: * selected_indices = tf.image.non_max_suppression_v2( * boxes, scores, max_output_size, iou_threshold, score_threshold) * selected_boxes = tf.gather(boxes, selected_indices) * * Arguments: * * scope: A Scope object * * boxes: A 2-D float tensor of shape {@code [num_boxes, 4]}. * * scores: A 1-D float tensor of shape {@code [num_boxes]} representing a single * score corresponding to each box (each row of boxes). * * max_output_size: A scalar integer tensor representing the maximum number of * boxes to be selected by non max suppression. * * iou_threshold: A 0-D float tensor representing the threshold for deciding whether * boxes overlap too much with respect to IOU. * * score_threshold: A 0-D float tensor representing the threshold for deciding when to remove * boxes based on score. * * Optional attributes (see {@code Attrs}): * * pad_to_max_output_size: If true, the output {@code selected_indices} is padded to be of length * {@code max_output_size}. Defaults to false. * * Returns: * * {@code Output} selected_indices: A 1-D integer tensor of shape {@code [M]} representing the selected * indices from the boxes tensor, where {@code M <= max_output_size}. * * {@code Output} valid_outputs: A 0-D integer tensor representing the number of valid elements in * {@code selected_indices}, with the valid elements appearing first. */ @Namespace("tensorflow::ops") @NoOffset public static class NonMaxSuppressionV4 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NonMaxSuppressionV4(Pointer p) { super(p); } /** Optional attribute setters for NonMaxSuppressionV4 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, the output {@code selected_indices} is padded to be of length * {@code max_output_size}. Defaults to false. * * Defaults to false */ public native @ByVal Attrs PadToMaxOutputSize(@Cast("bool") boolean x); public native @Cast("bool") boolean pad_to_max_output_size_(); public native Attrs pad_to_max_output_size_(boolean pad_to_max_output_size_); } public NonMaxSuppressionV4(@Const @ByRef Scope scope, @ByVal Input boxes, @ByVal Input scores, @ByVal Input max_output_size, @ByVal Input iou_threshold, @ByVal Input score_threshold) { super((Pointer)null); allocate(scope, boxes, scores, max_output_size, iou_threshold, score_threshold); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input boxes, @ByVal Input scores, @ByVal Input max_output_size, @ByVal Input iou_threshold, @ByVal Input score_threshold); public NonMaxSuppressionV4(@Const @ByRef Scope scope, @ByVal Input boxes, @ByVal Input scores, @ByVal Input max_output_size, @ByVal Input iou_threshold, @ByVal Input score_threshold, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, boxes, scores, max_output_size, iou_threshold, score_threshold, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input boxes, @ByVal Input scores, @ByVal Input max_output_size, @ByVal Input iou_threshold, @ByVal Input score_threshold, @Const @ByRef Attrs attrs); public static native @ByVal Attrs PadToMaxOutputSize(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native NonMaxSuppressionV4 operation(Operation operation); public native @ByRef Output selected_indices(); public native NonMaxSuppressionV4 selected_indices(Output selected_indices); public native @ByRef Output valid_outputs(); public native NonMaxSuppressionV4 valid_outputs(Output valid_outputs); } /** Greedily selects a subset of bounding boxes in descending order of score, * * pruning away boxes that have high overlaps * with previously selected boxes. Bounding boxes with score less than * {@code score_threshold} are removed. N-by-n overlap values are supplied as square matrix, * which allows for defining a custom overlap criterium (eg. intersection over union, * intersection over area, etc.). * * The output of this operation is a set of integers indexing into the input * collection of bounding boxes representing the selected boxes. The bounding * box coordinates corresponding to the selected indices can then be obtained * using the {@code tf.gather operation}. For example: * * selected_indices = tf.image.non_max_suppression_with_overlaps( * overlaps, scores, max_output_size, overlap_threshold, score_threshold) * selected_boxes = tf.gather(boxes, selected_indices) * * Arguments: * * scope: A Scope object * * overlaps: A 2-D float tensor of shape {@code [num_boxes, num_boxes]} representing * the n-by-n box overlap values. * * scores: A 1-D float tensor of shape {@code [num_boxes]} representing a single * score corresponding to each box (each row of boxes). * * max_output_size: A scalar integer tensor representing the maximum number of * boxes to be selected by non max suppression. * * overlap_threshold: A 0-D float tensor representing the threshold for deciding whether * boxes overlap too. * * score_threshold: A 0-D float tensor representing the threshold for deciding when to remove * boxes based on score. * * Returns: * * {@code Output}: A 1-D integer tensor of shape {@code [M]} representing the selected * indices from the boxes tensor, where {@code M <= max_output_size}. */ @Namespace("tensorflow::ops") @NoOffset public static class NonMaxSuppressionWithOverlaps extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NonMaxSuppressionWithOverlaps(Pointer p) { super(p); } public NonMaxSuppressionWithOverlaps(@Const @ByRef Scope scope, @ByVal Input overlaps, @ByVal Input scores, @ByVal Input max_output_size, @ByVal Input overlap_threshold, @ByVal Input score_threshold) { super((Pointer)null); allocate(scope, overlaps, scores, max_output_size, overlap_threshold, score_threshold); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input overlaps, @ByVal Input scores, @ByVal Input max_output_size, @ByVal Input overlap_threshold, @ByVal Input score_threshold); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native NonMaxSuppressionWithOverlaps operation(Operation operation); public native @ByRef Output selected_indices(); public native NonMaxSuppressionWithOverlaps selected_indices(Output selected_indices); } /** Resize quantized {@code images} to {@code size} using quantized bilinear interpolation. * * Input images and output images must be quantized types. * * Arguments: * * scope: A Scope object * * images: 4-D with shape {@code [batch, height, width, channels]}. * * size: = A 1-D int32 Tensor of 2 elements: {@code new_height, new_width}. The * new size for the images. * * Optional attributes (see {@code Attrs}): * * align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are * aligned, preserving the values at the corner pixels. Defaults to false. * * Returns: * * {@code Output} resized_images: 4-D with shape * {@code [batch, new_height, new_width, channels]}. * * {@code Output} out_min * * {@code Output} out_max */ @Namespace("tensorflow::ops") @NoOffset public static class QuantizedResizeBilinear extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QuantizedResizeBilinear(Pointer p) { super(p); } /** Optional attribute setters for QuantizedResizeBilinear */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, the centers of the 4 corner pixels of the input and output tensors are * aligned, preserving the values at the corner pixels. Defaults to false. * * Defaults to false */ public native @ByVal Attrs AlignCorners(@Cast("bool") boolean x); public native @Cast("bool") boolean align_corners_(); public native Attrs align_corners_(boolean align_corners_); } public QuantizedResizeBilinear(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input size, @ByVal Input min, @ByVal Input max) { super((Pointer)null); allocate(scope, images, size, min, max); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input size, @ByVal Input min, @ByVal Input max); public QuantizedResizeBilinear(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input size, @ByVal Input min, @ByVal Input max, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, images, size, min, max, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input size, @ByVal Input min, @ByVal Input max, @Const @ByRef Attrs attrs); public static native @ByVal Attrs AlignCorners(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native QuantizedResizeBilinear operation(Operation operation); public native @ByRef Output resized_images(); public native QuantizedResizeBilinear resized_images(Output resized_images); public native @ByRef Output out_min(); public native QuantizedResizeBilinear out_min(Output out_min); public native @ByRef Output out_max(); public native QuantizedResizeBilinear out_max(Output out_max); } /** Converts one or more images from RGB to HSV. * * Outputs a tensor of the same shape as the {@code images} tensor, containing the HSV * value of the pixels. The output is only well defined if the value in {@code images} * are in {@code [0,1]}. * * {@code output[..., 0]} contains hue, {@code output[..., 1]} contains saturation, and * {@code output[..., 2]} contains value. All HSV values are in {@code [0,1]}. A hue of 0 * corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue. * * Arguments: * * scope: A Scope object * * images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3. * * Returns: * * {@code Output}: {@code images} converted to HSV. */ @Namespace("tensorflow::ops") @NoOffset public static class RGBToHSV extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RGBToHSV(Pointer p) { super(p); } public RGBToHSV(@Const @ByRef Scope scope, @ByVal Input images) { super((Pointer)null); allocate(scope, images); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native RGBToHSV operation(Operation operation); public native @ByRef Output output(); public native RGBToHSV output(Output output); } /** Resize {@code images} to {@code size} using area interpolation. * * Input images can be of different types but output images are always float. * * The range of pixel values for the output image might be slightly different * from the range for the input image because of limited numerical precision. * To guarantee an output range, for example {@code [0.0, 1.0]}, apply * {@code tf.clip_by_value} to the output. * * Each output pixel is computed by first transforming the pixel's footprint into * the input tensor and then averaging the pixels that intersect the footprint. An * input pixel's contribution to the average is weighted by the fraction of its * area that intersects the footprint. This is the same as OpenCV's INTER_AREA. * * Arguments: * * scope: A Scope object * * images: 4-D with shape {@code [batch, height, width, channels]}. * * size: = A 1-D int32 Tensor of 2 elements: {@code new_height, new_width}. The * new size for the images. * * Optional attributes (see {@code Attrs}): * * align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are * aligned, preserving the values at the corner pixels. Defaults to false. * * Returns: * * {@code Output}: 4-D with shape * {@code [batch, new_height, new_width, channels]}. */ @Namespace("tensorflow::ops") @NoOffset public static class ResizeArea extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResizeArea(Pointer p) { super(p); } /** Optional attribute setters for ResizeArea */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, the centers of the 4 corner pixels of the input and output tensors are * aligned, preserving the values at the corner pixels. Defaults to false. * * Defaults to false */ public native @ByVal Attrs AlignCorners(@Cast("bool") boolean x); public native @Cast("bool") boolean align_corners_(); public native Attrs align_corners_(boolean align_corners_); } public ResizeArea(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input size) { super((Pointer)null); allocate(scope, images, size); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input size); public ResizeArea(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input size, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, images, size, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input size, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs AlignCorners(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResizeArea operation(Operation operation); public native @ByRef Output resized_images(); public native ResizeArea resized_images(Output resized_images); } /** Resize {@code images} to {@code size} using bicubic interpolation. * * Input images can be of different types but output images are always float. * * Arguments: * * scope: A Scope object * * images: 4-D with shape {@code [batch, height, width, channels]}. * * size: = A 1-D int32 Tensor of 2 elements: {@code new_height, new_width}. The * new size for the images. * * Optional attributes (see {@code Attrs}): * * align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are * aligned, preserving the values at the corner pixels. Defaults to false. * * Returns: * * {@code Output}: 4-D with shape * {@code [batch, new_height, new_width, channels]}. */ @Namespace("tensorflow::ops") @NoOffset public static class ResizeBicubic extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResizeBicubic(Pointer p) { super(p); } /** Optional attribute setters for ResizeBicubic */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, the centers of the 4 corner pixels of the input and output tensors are * aligned, preserving the values at the corner pixels. Defaults to false. * * Defaults to false */ public native @ByVal Attrs AlignCorners(@Cast("bool") boolean x); public native @Cast("bool") boolean align_corners_(); public native Attrs align_corners_(boolean align_corners_); } public ResizeBicubic(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input size) { super((Pointer)null); allocate(scope, images, size); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input size); public ResizeBicubic(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input size, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, images, size, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input size, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs AlignCorners(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResizeBicubic operation(Operation operation); public native @ByRef Output resized_images(); public native ResizeBicubic resized_images(Output resized_images); } /** Resize {@code images} to {@code size} using bilinear interpolation. * * Input images can be of different types but output images are always float. * * Arguments: * * scope: A Scope object * * images: 4-D with shape {@code [batch, height, width, channels]}. * * size: = A 1-D int32 Tensor of 2 elements: {@code new_height, new_width}. The * new size for the images. * * Optional attributes (see {@code Attrs}): * * align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are * aligned, preserving the values at the corner pixels. Defaults to false. * * Returns: * * {@code Output}: 4-D with shape * {@code [batch, new_height, new_width, channels]}. */ @Namespace("tensorflow::ops") @NoOffset public static class ResizeBilinear extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResizeBilinear(Pointer p) { super(p); } /** Optional attribute setters for ResizeBilinear */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, the centers of the 4 corner pixels of the input and output tensors are * aligned, preserving the values at the corner pixels. Defaults to false. * * Defaults to false */ public native @ByVal Attrs AlignCorners(@Cast("bool") boolean x); public native @Cast("bool") boolean align_corners_(); public native Attrs align_corners_(boolean align_corners_); } public ResizeBilinear(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input size) { super((Pointer)null); allocate(scope, images, size); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input size); public ResizeBilinear(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input size, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, images, size, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input size, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs AlignCorners(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResizeBilinear operation(Operation operation); public native @ByRef Output resized_images(); public native ResizeBilinear resized_images(Output resized_images); } /** Resize {@code images} to {@code size} using nearest neighbor interpolation. * * Arguments: * * scope: A Scope object * * images: 4-D with shape {@code [batch, height, width, channels]}. * * size: = A 1-D int32 Tensor of 2 elements: {@code new_height, new_width}. The * new size for the images. * * Optional attributes (see {@code Attrs}): * * align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are * aligned, preserving the values at the corner pixels. Defaults to false. * * Returns: * * {@code Output}: 4-D with shape * {@code [batch, new_height, new_width, channels]}. */ @Namespace("tensorflow::ops") @NoOffset public static class ResizeNearestNeighbor extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResizeNearestNeighbor(Pointer p) { super(p); } /** Optional attribute setters for ResizeNearestNeighbor */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, the centers of the 4 corner pixels of the input and output tensors are * aligned, preserving the values at the corner pixels. Defaults to false. * * Defaults to false */ public native @ByVal Attrs AlignCorners(@Cast("bool") boolean x); public native @Cast("bool") boolean align_corners_(); public native Attrs align_corners_(boolean align_corners_); } public ResizeNearestNeighbor(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input size) { super((Pointer)null); allocate(scope, images, size); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input size); public ResizeNearestNeighbor(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input size, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, images, size, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input images, @ByVal Input size, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs AlignCorners(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResizeNearestNeighbor operation(Operation operation); public native @ByRef Output resized_images(); public native ResizeNearestNeighbor resized_images(Output resized_images); } /** Generate a single randomly distorted bounding box for an image. * * Bounding box annotations are often supplied in addition to ground-truth labels * in image recognition or object localization tasks. A common technique for * training such a system is to randomly distort an image while preserving * its content, i.e. *data augmentation*. This Op outputs a randomly distorted * localization of an object, i.e. bounding box, given an {@code image_size}, * {@code bounding_boxes} and a series of constraints. * * The output of this Op is a single bounding box that may be used to crop the * original image. The output is returned as 3 tensors: {@code begin}, {@code size} and * {@code bboxes}. The first 2 tensors can be fed directly into {@code tf.slice} to crop the * image. The latter may be supplied to {@code tf.image.draw_bounding_boxes} to visualize * what the bounding box looks like. * * Bounding boxes are supplied and returned as {@code [y_min, x_min, y_max, x_max]}. The * bounding box coordinates are floats in {@code [0.0, 1.0]} relative to the width and * height of the underlying image. * * For example, * *

{@code python
 *      # Generate a single distorted bounding box.
 *      begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
 *          tf.shape(image),
 *          bounding_boxes=bounding_boxes)
 * 
 *      # Draw the bounding box in an image summary.
 *      image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
 *                                                    bbox_for_draw)
 *      tf.summary.image('images_with_box', image_with_box)
 * 
 *      # Employ the bounding box to distort the image.
 *      distorted_image = tf.slice(image, begin, size)
 *  }
* * Note that if no bounding box information is available, setting * {@code use_image_if_no_bounding_boxes = true} will assume there is a single implicit * bounding box covering the whole image. If {@code use_image_if_no_bounding_boxes} is * false and no bounding boxes are supplied, an error is raised. * * Arguments: * * scope: A Scope object * * image_size: 1-D, containing {@code [height, width, channels]}. * * bounding_boxes: 3-D with shape {@code [batch, N, 4]} describing the N bounding boxes * associated with the image. * * Optional attributes (see {@code Attrs}): * * seed: If either {@code seed} or {@code seed2} are set to non-zero, the random number * generator is seeded by the given {@code seed}. Otherwise, it is seeded by a random * seed. * * seed2: A second seed to avoid seed collision. * * min_object_covered: The cropped area of the image must contain at least this * fraction of any bounding box supplied. The value of this parameter should be * non-negative. In the case of 0, the cropped area does not need to overlap * any of the bounding boxes supplied. * * aspect_ratio_range: The cropped area of the image must have an aspect ratio = * width / height within this range. * * area_range: The cropped area of the image must contain a fraction of the * supplied image within this range. * * max_attempts: Number of attempts at generating a cropped region of the image * of the specified constraints. After {@code max_attempts} failures, return the entire * image. * * use_image_if_no_bounding_boxes: Controls behavior if no bounding boxes supplied. * If true, assume an implicit bounding box covering the whole input. If false, * raise an error. * * Returns: * * {@code Output} begin: 1-D, containing {@code [offset_height, offset_width, 0]}. Provide as input to * {@code tf.slice}. * * {@code Output} size: 1-D, containing {@code [target_height, target_width, -1]}. Provide as input to * {@code tf.slice}. * * {@code Output} bboxes: 3-D with shape {@code [1, 1, 4]} containing the distorted bounding box. * Provide as input to {@code tf.image.draw_bounding_boxes}. */ @Namespace("tensorflow::ops") @NoOffset public static class SampleDistortedBoundingBox extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SampleDistortedBoundingBox(Pointer p) { super(p); } /** Optional attribute setters for SampleDistortedBoundingBox */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If either {@code seed} or {@code seed2} are set to non-zero, the random number * generator is seeded by the given {@code seed}. Otherwise, it is seeded by a random * seed. * * Defaults to 0 */ /// public native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); /** A second seed to avoid seed collision. * * Defaults to 0 */ /// public native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); /** The cropped area of the image must contain at least this * fraction of any bounding box supplied. The value of this parameter should be * non-negative. In the case of 0, the cropped area does not need to overlap * any of the bounding boxes supplied. * * Defaults to 0.1 */ /// public native @ByVal Attrs MinObjectCovered(float x); /** The cropped area of the image must have an aspect ratio = * width / height within this range. * * Defaults to [0.75, 1.33] */ /// public native @ByVal Attrs AspectRatioRange(@ArraySlice FloatPointer x); public native @ByVal Attrs AspectRatioRange(@ArraySlice FloatBuffer x); public native @ByVal Attrs AspectRatioRange(@ArraySlice float... x); /** The cropped area of the image must contain a fraction of the * supplied image within this range. * * Defaults to [0.05, 1] */ /// public native @ByVal Attrs AreaRange(@ArraySlice FloatPointer x); public native @ByVal Attrs AreaRange(@ArraySlice FloatBuffer x); public native @ByVal Attrs AreaRange(@ArraySlice float... x); /** Number of attempts at generating a cropped region of the image * of the specified constraints. After {@code max_attempts} failures, return the entire * image. * * Defaults to 100 */ /// public native @ByVal Attrs MaxAttempts(@Cast("tensorflow::int64") long x); /** Controls behavior if no bounding boxes supplied. * If true, assume an implicit bounding box covering the whole input. If false, * raise an error. * * Defaults to false */ public native @ByVal Attrs UseImageIfNoBoundingBoxes(@Cast("bool") boolean x); public native @Cast("tensorflow::int64") long seed_(); public native Attrs seed_(long seed_); public native @Cast("tensorflow::int64") long seed2_(); public native Attrs seed2_(long seed2_); public native float min_object_covered_(); public native Attrs min_object_covered_(float min_object_covered_); public native @ArraySlice FloatPointer aspect_ratio_range_(); public native Attrs aspect_ratio_range_(FloatPointer aspect_ratio_range_); public native @ArraySlice FloatPointer area_range_(); public native Attrs area_range_(FloatPointer area_range_); public native @Cast("tensorflow::int64") long max_attempts_(); public native Attrs max_attempts_(long max_attempts_); public native @Cast("bool") boolean use_image_if_no_bounding_boxes_(); public native Attrs use_image_if_no_bounding_boxes_(boolean use_image_if_no_bounding_boxes_); } public SampleDistortedBoundingBox(@Const @ByRef Scope scope, @ByVal Input image_size, @ByVal Input bounding_boxes) { super((Pointer)null); allocate(scope, image_size, bounding_boxes); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input image_size, @ByVal Input bounding_boxes); public SampleDistortedBoundingBox(@Const @ByRef Scope scope, @ByVal Input image_size, @ByVal Input bounding_boxes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, image_size, bounding_boxes, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input image_size, @ByVal Input bounding_boxes, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs MinObjectCovered(float x); public static native @ByVal Attrs AspectRatioRange(@ArraySlice FloatPointer x); public static native @ByVal Attrs AspectRatioRange(@ArraySlice FloatBuffer x); public static native @ByVal Attrs AspectRatioRange(@ArraySlice float... x); public static native @ByVal Attrs AreaRange(@ArraySlice FloatPointer x); public static native @ByVal Attrs AreaRange(@ArraySlice FloatBuffer x); public static native @ByVal Attrs AreaRange(@ArraySlice float... x); public static native @ByVal Attrs MaxAttempts(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs UseImageIfNoBoundingBoxes(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native SampleDistortedBoundingBox operation(Operation operation); public native @ByRef Output begin(); public native SampleDistortedBoundingBox begin(Output begin); public native @ByRef Output size(); public native SampleDistortedBoundingBox size(Output size); public native @ByRef Output bboxes(); public native SampleDistortedBoundingBox bboxes(Output bboxes); } /** Generate a single randomly distorted bounding box for an image. * * Bounding box annotations are often supplied in addition to ground-truth labels * in image recognition or object localization tasks. A common technique for * training such a system is to randomly distort an image while preserving * its content, i.e. *data augmentation*. This Op outputs a randomly distorted * localization of an object, i.e. bounding box, given an {@code image_size}, * {@code bounding_boxes} and a series of constraints. * * The output of this Op is a single bounding box that may be used to crop the * original image. The output is returned as 3 tensors: {@code begin}, {@code size} and * {@code bboxes}. The first 2 tensors can be fed directly into {@code tf.slice} to crop the * image. The latter may be supplied to {@code tf.image.draw_bounding_boxes} to visualize * what the bounding box looks like. * * Bounding boxes are supplied and returned as {@code [y_min, x_min, y_max, x_max]}. The * bounding box coordinates are floats in {@code [0.0, 1.0]} relative to the width and * height of the underlying image. * * For example, * *
{@code python
 *      # Generate a single distorted bounding box.
 *      begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
 *          tf.shape(image),
 *          bounding_boxes=bounding_boxes)
 * 
 *      # Draw the bounding box in an image summary.
 *      image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
 *                                                    bbox_for_draw)
 *      tf.summary.image('images_with_box', image_with_box)
 * 
 *      # Employ the bounding box to distort the image.
 *      distorted_image = tf.slice(image, begin, size)
 *  }
* * Note that if no bounding box information is available, setting * {@code use_image_if_no_bounding_boxes = true} will assume there is a single implicit * bounding box covering the whole image. If {@code use_image_if_no_bounding_boxes} is * false and no bounding boxes are supplied, an error is raised. * * Arguments: * * scope: A Scope object * * image_size: 1-D, containing {@code [height, width, channels]}. * * bounding_boxes: 3-D with shape {@code [batch, N, 4]} describing the N bounding boxes * associated with the image. * * min_object_covered: The cropped area of the image must contain at least this * fraction of any bounding box supplied. The value of this parameter should be * non-negative. In the case of 0, the cropped area does not need to overlap * any of the bounding boxes supplied. * * Optional attributes (see {@code Attrs}): * * seed: If either {@code seed} or {@code seed2} are set to non-zero, the random number * generator is seeded by the given {@code seed}. Otherwise, it is seeded by a random * seed. * * seed2: A second seed to avoid seed collision. * * aspect_ratio_range: The cropped area of the image must have an aspect ratio = * width / height within this range. * * area_range: The cropped area of the image must contain a fraction of the * supplied image within this range. * * max_attempts: Number of attempts at generating a cropped region of the image * of the specified constraints. After {@code max_attempts} failures, return the entire * image. * * use_image_if_no_bounding_boxes: Controls behavior if no bounding boxes supplied. * If true, assume an implicit bounding box covering the whole input. If false, * raise an error. * * Returns: * * {@code Output} begin: 1-D, containing {@code [offset_height, offset_width, 0]}. Provide as input to * {@code tf.slice}. * * {@code Output} size: 1-D, containing {@code [target_height, target_width, -1]}. Provide as input to * {@code tf.slice}. * * {@code Output} bboxes: 3-D with shape {@code [1, 1, 4]} containing the distorted bounding box. * Provide as input to {@code tf.image.draw_bounding_boxes}. */ @Namespace("tensorflow::ops") @NoOffset public static class SampleDistortedBoundingBoxV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SampleDistortedBoundingBoxV2(Pointer p) { super(p); } /** Optional attribute setters for SampleDistortedBoundingBoxV2 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If either {@code seed} or {@code seed2} are set to non-zero, the random number * generator is seeded by the given {@code seed}. Otherwise, it is seeded by a random * seed. * * Defaults to 0 */ /// public native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); /** A second seed to avoid seed collision. * * Defaults to 0 */ /// public native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); /** The cropped area of the image must have an aspect ratio = * width / height within this range. * * Defaults to [0.75, 1.33] */ /// public native @ByVal Attrs AspectRatioRange(@ArraySlice FloatPointer x); public native @ByVal Attrs AspectRatioRange(@ArraySlice FloatBuffer x); public native @ByVal Attrs AspectRatioRange(@ArraySlice float... x); /** The cropped area of the image must contain a fraction of the * supplied image within this range. * * Defaults to [0.05, 1] */ /// public native @ByVal Attrs AreaRange(@ArraySlice FloatPointer x); public native @ByVal Attrs AreaRange(@ArraySlice FloatBuffer x); public native @ByVal Attrs AreaRange(@ArraySlice float... x); /** Number of attempts at generating a cropped region of the image * of the specified constraints. After {@code max_attempts} failures, return the entire * image. * * Defaults to 100 */ /// public native @ByVal Attrs MaxAttempts(@Cast("tensorflow::int64") long x); /** Controls behavior if no bounding boxes supplied. * If true, assume an implicit bounding box covering the whole input. If false, * raise an error. * * Defaults to false */ public native @ByVal Attrs UseImageIfNoBoundingBoxes(@Cast("bool") boolean x); public native @Cast("tensorflow::int64") long seed_(); public native Attrs seed_(long seed_); public native @Cast("tensorflow::int64") long seed2_(); public native Attrs seed2_(long seed2_); public native @ArraySlice FloatPointer aspect_ratio_range_(); public native Attrs aspect_ratio_range_(FloatPointer aspect_ratio_range_); public native @ArraySlice FloatPointer area_range_(); public native Attrs area_range_(FloatPointer area_range_); public native @Cast("tensorflow::int64") long max_attempts_(); public native Attrs max_attempts_(long max_attempts_); public native @Cast("bool") boolean use_image_if_no_bounding_boxes_(); public native Attrs use_image_if_no_bounding_boxes_(boolean use_image_if_no_bounding_boxes_); } public SampleDistortedBoundingBoxV2(@Const @ByRef Scope scope, @ByVal Input image_size, @ByVal Input bounding_boxes, @ByVal Input min_object_covered) { super((Pointer)null); allocate(scope, image_size, bounding_boxes, min_object_covered); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input image_size, @ByVal Input bounding_boxes, @ByVal Input min_object_covered); public SampleDistortedBoundingBoxV2(@Const @ByRef Scope scope, @ByVal Input image_size, @ByVal Input bounding_boxes, @ByVal Input min_object_covered, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, image_size, bounding_boxes, min_object_covered, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input image_size, @ByVal Input bounding_boxes, @ByVal Input min_object_covered, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs AspectRatioRange(@ArraySlice FloatPointer x); public static native @ByVal Attrs AspectRatioRange(@ArraySlice FloatBuffer x); public static native @ByVal Attrs AspectRatioRange(@ArraySlice float... x); public static native @ByVal Attrs AreaRange(@ArraySlice FloatPointer x); public static native @ByVal Attrs AreaRange(@ArraySlice FloatBuffer x); public static native @ByVal Attrs AreaRange(@ArraySlice float... x); public static native @ByVal Attrs MaxAttempts(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs UseImageIfNoBoundingBoxes(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native SampleDistortedBoundingBoxV2 operation(Operation operation); public native @ByRef Output begin(); public native SampleDistortedBoundingBoxV2 begin(Output begin); public native @ByRef Output size(); public native SampleDistortedBoundingBoxV2 size(Output size); public native @ByRef Output bboxes(); public native SampleDistortedBoundingBoxV2 bboxes(Output bboxes); } /** \} */ // namespace ops // namespace tensorflow // #endif // TENSORFLOW_CC_OPS_IMAGE_OPS_H_ // Parsed from tensorflow/cc/ops/io_ops.h // This file is MACHINE GENERATED! Do not edit. // #ifndef TENSORFLOW_CC_OPS_IO_OPS_H_ // #define TENSORFLOW_CC_OPS_IO_OPS_H_ // This file is MACHINE GENERATED! Do not edit. // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/cc/framework/scope.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/gtl/array_slice.h" /** \defgroup io_ops Io Ops * \{

* A Reader that outputs fixed-length records from a file. * * Arguments: * * scope: A Scope object * * record_bytes: Number of bytes in the record. * * Optional attributes (see {@code Attrs}): * * header_bytes: Number of bytes in the header, defaults to 0. * * footer_bytes: Number of bytes in the footer, defaults to 0. * * hop_bytes: Number of bytes to hop before each read. Default of 0 means using * record_bytes. * * container: If non-empty, this reader is placed in the given container. * Otherwise, a default container is used. * * shared_name: If non-empty, this reader is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. * * encoding: The type of encoding for the file. Currently ZLIB and GZIP * are supported. Defaults to none. * * Returns: * * {@code Output}: The handle to reference the Reader. */ @Namespace("tensorflow::ops") @NoOffset public static class FixedLengthRecordReader extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FixedLengthRecordReader(Pointer p) { super(p); } /** Optional attribute setters for FixedLengthRecordReader */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Number of bytes in the header, defaults to 0. * * Defaults to 0 */ /// public native @ByVal Attrs HeaderBytes(@Cast("tensorflow::int64") long x); /** Number of bytes in the footer, defaults to 0. * * Defaults to 0 */ /// public native @ByVal Attrs FooterBytes(@Cast("tensorflow::int64") long x); /** Number of bytes to hop before each read. Default of 0 means using * record_bytes. * * Defaults to 0 */ /// public native @ByVal Attrs HopBytes(@Cast("tensorflow::int64") long x); /** If non-empty, this reader is placed in the given container. * Otherwise, a default container is used. * * Defaults to "" */ /// public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** If non-empty, this reader is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. * * Defaults to "" */ /// public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); /** The type of encoding for the file. Currently ZLIB and GZIP * are supported. Defaults to none. * * Defaults to "" */ public native @ByVal Attrs Encoding(@StringPiece BytePointer x); public native @ByVal Attrs Encoding(@StringPiece String x); public native @Cast("tensorflow::int64") long header_bytes_(); public native Attrs header_bytes_(long header_bytes_); public native @Cast("tensorflow::int64") long footer_bytes_(); public native Attrs footer_bytes_(long footer_bytes_); public native @Cast("tensorflow::int64") long hop_bytes_(); public native Attrs hop_bytes_(long hop_bytes_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); public native @StringPiece BytePointer encoding_(); public native Attrs encoding_(BytePointer encoding_); } public FixedLengthRecordReader(@Const @ByRef Scope scope, @Cast("tensorflow::int64") long record_bytes) { super((Pointer)null); allocate(scope, record_bytes); } private native void allocate(@Const @ByRef Scope scope, @Cast("tensorflow::int64") long record_bytes); public FixedLengthRecordReader(@Const @ByRef Scope scope, @Cast("tensorflow::int64") long record_bytes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, record_bytes, attrs); } private native void allocate(@Const @ByRef Scope scope, @Cast("tensorflow::int64") long record_bytes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs HeaderBytes(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs FooterBytes(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs HopBytes(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public static native @ByVal Attrs Encoding(@StringPiece BytePointer x); public static native @ByVal Attrs Encoding(@StringPiece String x); public native @ByRef Operation operation(); public native FixedLengthRecordReader operation(Operation operation); public native @ByRef Output reader_handle(); public native FixedLengthRecordReader reader_handle(Output reader_handle); } /** A Reader that outputs the queued work as both the key and value. * * To use, enqueue strings in a Queue. ReaderRead will take the front * work string and output (work, work). * * Arguments: * * scope: A Scope object * * Optional attributes (see {@code Attrs}): * * container: If non-empty, this reader is placed in the given container. * Otherwise, a default container is used. * * shared_name: If non-empty, this reader is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. * * Returns: * * {@code Output}: The handle to reference the Reader. */ @Namespace("tensorflow::ops") @NoOffset public static class IdentityReader extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IdentityReader(Pointer p) { super(p); } /** Optional attribute setters for IdentityReader */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If non-empty, this reader is placed in the given container. * Otherwise, a default container is used. * * Defaults to "" */ /// public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** If non-empty, this reader is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. * * Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public IdentityReader(@Const @ByRef Scope scope) { super((Pointer)null); allocate(scope); } private native void allocate(@Const @ByRef Scope scope); public IdentityReader(@Const @ByRef Scope scope, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, attrs); } private native void allocate(@Const @ByRef Scope scope, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native IdentityReader operation(Operation operation); public native @ByRef Output reader_handle(); public native IdentityReader reader_handle(Output reader_handle); } /** A Reader that outputs the records from a LMDB file. * * Arguments: * * scope: A Scope object * * Optional attributes (see {@code Attrs}): * * container: If non-empty, this reader is placed in the given container. * Otherwise, a default container is used. * * shared_name: If non-empty, this reader is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. * * Returns: * * {@code Output}: The handle to reference the Reader. */ @Namespace("tensorflow::ops") @NoOffset public static class LMDBReader extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LMDBReader(Pointer p) { super(p); } /** Optional attribute setters for LMDBReader */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If non-empty, this reader is placed in the given container. * Otherwise, a default container is used. * * Defaults to "" */ /// public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** If non-empty, this reader is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. * * Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public LMDBReader(@Const @ByRef Scope scope) { super((Pointer)null); allocate(scope); } private native void allocate(@Const @ByRef Scope scope); public LMDBReader(@Const @ByRef Scope scope, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, attrs); } private native void allocate(@Const @ByRef Scope scope, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native LMDBReader operation(Operation operation); public native @ByRef Output reader_handle(); public native LMDBReader reader_handle(Output reader_handle); } /** Returns the set of files matching one or more glob patterns. * * Note that this routine only supports wildcard characters in the * basename portion of the pattern, not in the directory portion. * Note also that the order of filenames returned can be non-deterministic. * * Arguments: * * scope: A Scope object * * pattern: Shell wildcard pattern(s). Scalar or vector of type string. * * Returns: * * {@code Output}: A vector of matching filenames. */ @Namespace("tensorflow::ops") @NoOffset public static class MatchingFiles extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MatchingFiles(Pointer p) { super(p); } public MatchingFiles(@Const @ByRef Scope scope, @ByVal Input pattern) { super((Pointer)null); allocate(scope, pattern); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input pattern); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native MatchingFiles operation(Operation operation); public native @ByRef Output filenames(); public native MatchingFiles filenames(Output filenames); } /** V2 format specific: merges the metadata files of sharded checkpoints. The * * result is one logical checkpoint, with one physical metadata file and renamed * data files. * * Intended for "grouping" multiple checkpoints in a sharded checkpoint setup. * * If delete_old_dirs is true, attempts to delete recursively the dirname of each * path in the input checkpoint_prefixes. This is useful when those paths are non * user-facing temporary locations. * * Arguments: * * scope: A Scope object * * checkpoint_prefixes: prefixes of V2 checkpoints to merge. * * destination_prefix: scalar. The desired final prefix. Allowed to be the same * as one of the checkpoint_prefixes. * * Optional attributes (see {@code Attrs}): * * delete_old_dirs: see above. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class MergeV2Checkpoints extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MergeV2Checkpoints(Pointer p) { super(p); } /** Optional attribute setters for MergeV2Checkpoints */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** see above. * * Defaults to true */ public native @ByVal Attrs DeleteOldDirs(@Cast("bool") boolean x); public native @Cast("bool") boolean delete_old_dirs_(); public native Attrs delete_old_dirs_(boolean delete_old_dirs_); } public MergeV2Checkpoints(@Const @ByRef Scope scope, @ByVal Input checkpoint_prefixes, @ByVal Input destination_prefix) { super((Pointer)null); allocate(scope, checkpoint_prefixes, destination_prefix); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input checkpoint_prefixes, @ByVal Input destination_prefix); public MergeV2Checkpoints(@Const @ByRef Scope scope, @ByVal Input checkpoint_prefixes, @ByVal Input destination_prefix, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, checkpoint_prefixes, destination_prefix, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input checkpoint_prefixes, @ByVal Input destination_prefix, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs DeleteOldDirs(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native MergeV2Checkpoints operation(Operation operation); } /** Reads and outputs the entire contents of the input filename. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The contents tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class ReadFile extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReadFile(Pointer p) { super(p); } public ReadFile(@Const @ByRef Scope scope, @ByVal Input filename) { super((Pointer)null); allocate(scope, filename); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input filename); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native ReadFile operation(Operation operation); public native @ByRef Output contents(); public native ReadFile contents(Output contents); } /** Returns the number of records this Reader has produced. * * This is the same as the number of ReaderRead executions that have * succeeded. * * Arguments: * * scope: A Scope object * * reader_handle: Handle to a Reader. * * Returns: * * {@code Output}: The records_produced tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class ReaderNumRecordsProduced extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReaderNumRecordsProduced(Pointer p) { super(p); } public ReaderNumRecordsProduced(@Const @ByRef Scope scope, @ByVal Input reader_handle) { super((Pointer)null); allocate(scope, reader_handle); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input reader_handle); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native ReaderNumRecordsProduced operation(Operation operation); public native @ByRef Output records_produced(); public native ReaderNumRecordsProduced records_produced(Output records_produced); } /** Returns the number of work units this Reader has finished processing. * * Arguments: * * scope: A Scope object * * reader_handle: Handle to a Reader. * * Returns: * * {@code Output}: The units_completed tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class ReaderNumWorkUnitsCompleted extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReaderNumWorkUnitsCompleted(Pointer p) { super(p); } public ReaderNumWorkUnitsCompleted(@Const @ByRef Scope scope, @ByVal Input reader_handle) { super((Pointer)null); allocate(scope, reader_handle); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input reader_handle); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native ReaderNumWorkUnitsCompleted operation(Operation operation); public native @ByRef Output units_completed(); public native ReaderNumWorkUnitsCompleted units_completed(Output units_completed); } /** Returns up to {@code num_records} (key, value) pairs produced by a Reader. * * Will dequeue from the input queue if necessary (e.g. when the * Reader needs to start reading from a new file since it has finished * with the previous file). * It may return less than {@code num_records} even before the last batch. * * Arguments: * * scope: A Scope object * * reader_handle: Handle to a {@code Reader}. * * queue_handle: Handle to a {@code Queue}, with string work items. * * num_records: number of records to read from {@code Reader}. * * Returns: * * {@code Output} keys: A 1-D tensor. * * {@code Output} values: A 1-D tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class ReaderReadUpTo extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReaderReadUpTo(Pointer p) { super(p); } public ReaderReadUpTo(@Const @ByRef Scope scope, @ByVal Input reader_handle, @ByVal Input queue_handle, @ByVal Input num_records) { super((Pointer)null); allocate(scope, reader_handle, queue_handle, num_records); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input reader_handle, @ByVal Input queue_handle, @ByVal Input num_records); public native @ByRef Operation operation(); public native ReaderReadUpTo operation(Operation operation); public native @ByRef Output keys(); public native ReaderReadUpTo keys(Output keys); public native @ByRef Output values(); public native ReaderReadUpTo values(Output values); } /** Returns the next record (key, value pair) produced by a Reader. * * Will dequeue from the input queue if necessary (e.g. when the * Reader needs to start reading from a new file since it has finished * with the previous file). * * Arguments: * * scope: A Scope object * * reader_handle: Handle to a Reader. * * queue_handle: Handle to a Queue, with string work items. * * Returns: * * {@code Output} key: A scalar. * * {@code Output} value: A scalar. */ @Namespace("tensorflow::ops") @NoOffset public static class ReaderRead extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReaderRead(Pointer p) { super(p); } public ReaderRead(@Const @ByRef Scope scope, @ByVal Input reader_handle, @ByVal Input queue_handle) { super((Pointer)null); allocate(scope, reader_handle, queue_handle); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input reader_handle, @ByVal Input queue_handle); public native @ByRef Operation operation(); public native ReaderRead operation(Operation operation); public native @ByRef Output key(); public native ReaderRead key(Output key); public native @ByRef Output value(); public native ReaderRead value(Output value); } /** Restore a Reader to its initial clean state. * * Arguments: * * scope: A Scope object * * reader_handle: Handle to a Reader. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ReaderReset extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReaderReset(Pointer p) { super(p); } public ReaderReset(@Const @ByRef Scope scope, @ByVal Input reader_handle) { super((Pointer)null); allocate(scope, reader_handle); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input reader_handle); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public native @ByRef Operation operation(); public native ReaderReset operation(Operation operation); } /** Restore a reader to a previously saved state. * * Not all Readers support being restored, so this can produce an * Unimplemented error. * * Arguments: * * scope: A Scope object * * reader_handle: Handle to a Reader. * * state: Result of a ReaderSerializeState of a Reader with type * matching reader_handle. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ReaderRestoreState extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReaderRestoreState(Pointer p) { super(p); } public ReaderRestoreState(@Const @ByRef Scope scope, @ByVal Input reader_handle, @ByVal Input state) { super((Pointer)null); allocate(scope, reader_handle, state); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input reader_handle, @ByVal Input state); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public native @ByRef Operation operation(); public native ReaderRestoreState operation(Operation operation); } /** Produce a string tensor that encodes the state of a Reader. * * Not all Readers support being serialized, so this can produce an * Unimplemented error. * * Arguments: * * scope: A Scope object * * reader_handle: Handle to a Reader. * * Returns: * * {@code Output}: The state tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class ReaderSerializeState extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReaderSerializeState(Pointer p) { super(p); } public ReaderSerializeState(@Const @ByRef Scope scope, @ByVal Input reader_handle) { super((Pointer)null); allocate(scope, reader_handle); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input reader_handle); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native ReaderSerializeState operation(Operation operation); public native @ByRef Output state(); public native ReaderSerializeState state(Output state); } /** Restores a tensor from checkpoint files. * * Reads a tensor stored in one or several files. If there are several files (for * instance because a tensor was saved as slices), {@code file_pattern} may contain * wildcard symbols ({@code *} and {@code ?}) in the filename portion only, not in the * directory portion. * * If a {@code file_pattern} matches several files, {@code preferred_shard} can be used to hint * in which file the requested tensor is likely to be found. This op will first * open the file at index {@code preferred_shard} in the list of matching files and try * to restore tensors from that file. Only if some tensors or tensor slices are * not found in that first file, then the Op opens all the files. Setting * {@code preferred_shard} to match the value passed as the {@code shard} input * of a matching {@code Save} Op may speed up Restore. This attribute only affects * performance, not correctness. The default value -1 means files are processed in * order. * * See also {@code RestoreSlice}. * * Arguments: * * scope: A Scope object * * file_pattern: Must have a single element. The pattern of the files from * which we read the tensor. * * tensor_name: Must have a single element. The name of the tensor to be * restored. * * dt: The type of the tensor to be restored. * * Optional attributes (see {@code Attrs}): * * preferred_shard: Index of file to open first if multiple files match * {@code file_pattern}. * * Returns: * * {@code Output}: The restored tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Restore extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Restore(Pointer p) { super(p); } /** Optional attribute setters for Restore */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Index of file to open first if multiple files match * {@code file_pattern}. * * Defaults to -1 */ public native @ByVal Attrs PreferredShard(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long preferred_shard_(); public native Attrs preferred_shard_(long preferred_shard_); } public Restore(@Const @ByRef Scope scope, @ByVal Input file_pattern, @ByVal Input tensor_name, @Cast("tensorflow::DataType") int dt) { super((Pointer)null); allocate(scope, file_pattern, tensor_name, dt); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input file_pattern, @ByVal Input tensor_name, @Cast("tensorflow::DataType") int dt); public Restore(@Const @ByRef Scope scope, @ByVal Input file_pattern, @ByVal Input tensor_name, @Cast("tensorflow::DataType") int dt, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, file_pattern, tensor_name, dt, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input file_pattern, @ByVal Input tensor_name, @Cast("tensorflow::DataType") int dt, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs PreferredShard(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native Restore operation(Operation operation); public native @ByRef Output tensor(); public native Restore tensor(Output tensor); } /** Restores a tensor from checkpoint files. * * This is like {@code Restore} except that restored tensor can be listed as filling * only a slice of a larger tensor. {@code shape_and_slice} specifies the shape of the * larger tensor and the slice that the restored tensor covers. * * The {@code shape_and_slice} input has the same format as the * elements of the {@code shapes_and_slices} input of the {@code SaveSlices} op. * * Arguments: * * scope: A Scope object * * file_pattern: Must have a single element. The pattern of the files from * which we read the tensor. * * tensor_name: Must have a single element. The name of the tensor to be * restored. * * shape_and_slice: Scalar. The shapes and slice specifications to use when * restoring a tensors. * * dt: The type of the tensor to be restored. * * Optional attributes (see {@code Attrs}): * * preferred_shard: Index of file to open first if multiple files match * {@code file_pattern}. See the documentation for {@code Restore}. * * Returns: * * {@code Output}: The restored tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class RestoreSlice extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RestoreSlice(Pointer p) { super(p); } /** Optional attribute setters for RestoreSlice */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Index of file to open first if multiple files match * {@code file_pattern}. See the documentation for {@code Restore}. * * Defaults to -1 */ public native @ByVal Attrs PreferredShard(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long preferred_shard_(); public native Attrs preferred_shard_(long preferred_shard_); } public RestoreSlice(@Const @ByRef Scope scope, @ByVal Input file_pattern, @ByVal Input tensor_name, @ByVal Input shape_and_slice, @Cast("tensorflow::DataType") int dt) { super((Pointer)null); allocate(scope, file_pattern, tensor_name, shape_and_slice, dt); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input file_pattern, @ByVal Input tensor_name, @ByVal Input shape_and_slice, @Cast("tensorflow::DataType") int dt); public RestoreSlice(@Const @ByRef Scope scope, @ByVal Input file_pattern, @ByVal Input tensor_name, @ByVal Input shape_and_slice, @Cast("tensorflow::DataType") int dt, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, file_pattern, tensor_name, shape_and_slice, dt, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input file_pattern, @ByVal Input tensor_name, @ByVal Input shape_and_slice, @Cast("tensorflow::DataType") int dt, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs PreferredShard(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native RestoreSlice operation(Operation operation); public native @ByRef Output tensor(); public native RestoreSlice tensor(Output tensor); } /** Restores tensors from a V2 checkpoint. * * For backward compatibility with the V1 format, this Op currently allows * restoring from a V1 checkpoint as well: * - This Op first attempts to find the V2 index file pointed to by "prefix", and * if found proceed to read it as a V2 checkpoint; * - Otherwise the V1 read path is invoked. * Relying on this behavior is not recommended, as the ability to fall back to read * V1 might be deprecated and eventually removed. * * By default, restores the named tensors in full. If the caller wishes to restore * specific slices of stored tensors, "shape_and_slices" should be non-empty * strings and correspondingly well-formed. * * Callers must ensure all the named tensors are indeed stored in the checkpoint. * * Arguments: * * scope: A Scope object * * prefix: Must have a single element. The prefix of a V2 checkpoint. * * tensor_names: shape {N}. The names of the tensors to be restored. * * shape_and_slices: shape {N}. The slice specs of the tensors to be restored. * Empty strings indicate that they are non-partitioned tensors. * * dtypes: shape {N}. The list of expected dtype for the tensors. Must match * those stored in the checkpoint. * * Returns: * * {@code OutputList}: shape {N}. The restored tensors, whose shapes are read from the * checkpoint directly. */ @Namespace("tensorflow::ops") @NoOffset public static class RestoreV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RestoreV2(Pointer p) { super(p); } public RestoreV2(@Const @ByRef Scope scope, @ByVal Input prefix, @ByVal Input tensor_names, @ByVal Input shape_and_slices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes) { super((Pointer)null); allocate(scope, prefix, tensor_names, shape_and_slices, dtypes); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input prefix, @ByVal Input tensor_names, @ByVal Input shape_and_slices, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector dtypes); public native @ByVal @Name("operator []") Output get(@Cast("size_t") long index); public native @ByRef Operation operation(); public native RestoreV2 operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector tensors(); public native RestoreV2 tensors(OutputVector tensors); } /** Saves the input tensors to disk. * * The size of {@code tensor_names} must match the number of tensors in {@code data}. {@code data[i]} * is written to {@code filename} with name {@code tensor_names[i]}. * * See also {@code SaveSlices}. * * Arguments: * * scope: A Scope object * * filename: Must have a single element. The name of the file to which we write * the tensor. * * tensor_names: Shape {@code [N]}. The names of the tensors to be saved. * * data: {@code N} tensors to save. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class Save extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Save(Pointer p) { super(p); } public Save(@Const @ByRef Scope scope, @ByVal Input filename, @ByVal Input tensor_names, @ByVal InputList data) { super((Pointer)null); allocate(scope, filename, tensor_names, data); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input filename, @ByVal Input tensor_names, @ByVal InputList data); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public native @ByRef Operation operation(); public native Save operation(Operation operation); } /** Saves input tensors slices to disk. * * This is like {@code Save} except that tensors can be listed in the saved file as being * a slice of a larger tensor. {@code shapes_and_slices} specifies the shape of the * larger tensor and the slice that this tensor covers. {@code shapes_and_slices} must * have as many elements as {@code tensor_names}. * * Elements of the {@code shapes_and_slices} input must either be: * * * The empty string, in which case the corresponding tensor is * saved normally. * * A string of the form {@code dim0 dim1 ... dimN-1 slice-spec} where the * {@code dimI} are the dimensions of the larger tensor and {@code slice-spec} * specifies what part is covered by the tensor to save. * * {@code slice-spec} itself is a {@code :}-separated list: {@code slice0:slice1:...:sliceN-1} * where each {@code sliceI} is either: * * * The string {@code -} meaning that the slice covers all indices of this dimension * * {@code start,length} where {@code start} and {@code length} are integers. In that * case the slice covers {@code length} indices starting at {@code start}. * * See also {@code Save}. * * Arguments: * * scope: A Scope object * * filename: Must have a single element. The name of the file to which we write the * tensor. * * tensor_names: Shape {@code [N]}. The names of the tensors to be saved. * * shapes_and_slices: Shape {@code [N]}. The shapes and slice specifications to use when * saving the tensors. * * data: {@code N} tensors to save. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class SaveSlices extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SaveSlices(Pointer p) { super(p); } public SaveSlices(@Const @ByRef Scope scope, @ByVal Input filename, @ByVal Input tensor_names, @ByVal Input shapes_and_slices, @ByVal InputList data) { super((Pointer)null); allocate(scope, filename, tensor_names, shapes_and_slices, data); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input filename, @ByVal Input tensor_names, @ByVal Input shapes_and_slices, @ByVal InputList data); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public native @ByRef Operation operation(); public native SaveSlices operation(Operation operation); } /** Saves tensors in V2 checkpoint format. * * By default, saves the named tensors in full. If the caller wishes to save * specific slices of full tensors, "shape_and_slices" should be non-empty strings * and correspondingly well-formed. * * Arguments: * * scope: A Scope object * * prefix: Must have a single element. The prefix of the V2 checkpoint to which we * write the tensors. * * tensor_names: shape {N}. The names of the tensors to be saved. * * shape_and_slices: shape {N}. The slice specs of the tensors to be saved. * Empty strings indicate that they are non-partitioned tensors. * * tensors: {@code N} tensors to save. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class SaveV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SaveV2(Pointer p) { super(p); } public SaveV2(@Const @ByRef Scope scope, @ByVal Input prefix, @ByVal Input tensor_names, @ByVal Input shape_and_slices, @ByVal InputList tensors) { super((Pointer)null); allocate(scope, prefix, tensor_names, shape_and_slices, tensors); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input prefix, @ByVal Input tensor_names, @ByVal Input shape_and_slices, @ByVal InputList tensors); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public native @ByRef Operation operation(); public native SaveV2 operation(Operation operation); } /** Generate a sharded filename. The filename is printf formatted as * * %s-%05d-of-%05d, basename, shard, num_shards. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The filename tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class ShardedFilename extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ShardedFilename(Pointer p) { super(p); } public ShardedFilename(@Const @ByRef Scope scope, @ByVal Input basename, @ByVal Input shard, @ByVal Input num_shards) { super((Pointer)null); allocate(scope, basename, shard, num_shards); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input basename, @ByVal Input shard, @ByVal Input num_shards); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native ShardedFilename operation(Operation operation); public native @ByRef Output filename(); public native ShardedFilename filename(Output filename); } /** Generate a glob pattern matching all sharded file names. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The filename tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class ShardedFilespec extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ShardedFilespec(Pointer p) { super(p); } public ShardedFilespec(@Const @ByRef Scope scope, @ByVal Input basename, @ByVal Input num_shards) { super((Pointer)null); allocate(scope, basename, num_shards); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input basename, @ByVal Input num_shards); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native ShardedFilespec operation(Operation operation); public native @ByRef Output filename(); public native ShardedFilespec filename(Output filename); } /** A Reader that outputs the records from a TensorFlow Records file. * * Arguments: * * scope: A Scope object * * Optional attributes (see {@code Attrs}): * * container: If non-empty, this reader is placed in the given container. * Otherwise, a default container is used. * * shared_name: If non-empty, this reader is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. * * Returns: * * {@code Output}: The handle to reference the Reader. */ @Namespace("tensorflow::ops") @NoOffset public static class TFRecordReader extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TFRecordReader(Pointer p) { super(p); } /** Optional attribute setters for TFRecordReader */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If non-empty, this reader is placed in the given container. * Otherwise, a default container is used. * * Defaults to "" */ /// public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** If non-empty, this reader is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. * * Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); /** Defaults to "" */ public native @ByVal Attrs CompressionType(@StringPiece BytePointer x); public native @ByVal Attrs CompressionType(@StringPiece String x); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); public native @StringPiece BytePointer compression_type_(); public native Attrs compression_type_(BytePointer compression_type_); } public TFRecordReader(@Const @ByRef Scope scope) { super((Pointer)null); allocate(scope); } private native void allocate(@Const @ByRef Scope scope); public TFRecordReader(@Const @ByRef Scope scope, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, attrs); } private native void allocate(@Const @ByRef Scope scope, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public static native @ByVal Attrs CompressionType(@StringPiece BytePointer x); public static native @ByVal Attrs CompressionType(@StringPiece String x); public native @ByRef Operation operation(); public native TFRecordReader operation(Operation operation); public native @ByRef Output reader_handle(); public native TFRecordReader reader_handle(Output reader_handle); } /** A Reader that outputs the lines of a file delimited by '\n'. * * Arguments: * * scope: A Scope object * * Optional attributes (see {@code Attrs}): * * skip_header_lines: Number of lines to skip from the beginning of every file. * * container: If non-empty, this reader is placed in the given container. * Otherwise, a default container is used. * * shared_name: If non-empty, this reader is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. * * Returns: * * {@code Output}: The handle to reference the Reader. */ @Namespace("tensorflow::ops") @NoOffset public static class TextLineReader extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TextLineReader(Pointer p) { super(p); } /** Optional attribute setters for TextLineReader */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Number of lines to skip from the beginning of every file. * * Defaults to 0 */ /// public native @ByVal Attrs SkipHeaderLines(@Cast("tensorflow::int64") long x); /** If non-empty, this reader is placed in the given container. * Otherwise, a default container is used. * * Defaults to "" */ /// public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** If non-empty, this reader is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. * * Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @Cast("tensorflow::int64") long skip_header_lines_(); public native Attrs skip_header_lines_(long skip_header_lines_); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public TextLineReader(@Const @ByRef Scope scope) { super((Pointer)null); allocate(scope); } private native void allocate(@Const @ByRef Scope scope); public TextLineReader(@Const @ByRef Scope scope, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, attrs); } private native void allocate(@Const @ByRef Scope scope, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs SkipHeaderLines(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native TextLineReader operation(Operation operation); public native @ByRef Output reader_handle(); public native TextLineReader reader_handle(Output reader_handle); } /** A Reader that outputs the entire contents of a file as a value. * * To use, enqueue filenames in a Queue. The output of ReaderRead will * be a filename (key) and the contents of that file (value). * * Arguments: * * scope: A Scope object * * Optional attributes (see {@code Attrs}): * * container: If non-empty, this reader is placed in the given container. * Otherwise, a default container is used. * * shared_name: If non-empty, this reader is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. * * Returns: * * {@code Output}: The handle to reference the Reader. */ @Namespace("tensorflow::ops") @NoOffset public static class WholeFileReader extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public WholeFileReader(Pointer p) { super(p); } /** Optional attribute setters for WholeFileReader */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If non-empty, this reader is placed in the given container. * Otherwise, a default container is used. * * Defaults to "" */ /// public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** If non-empty, this reader is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. * * Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public WholeFileReader(@Const @ByRef Scope scope) { super((Pointer)null); allocate(scope); } private native void allocate(@Const @ByRef Scope scope); public WholeFileReader(@Const @ByRef Scope scope, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, attrs); } private native void allocate(@Const @ByRef Scope scope, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native WholeFileReader operation(Operation operation); public native @ByRef Output reader_handle(); public native WholeFileReader reader_handle(Output reader_handle); } /** Writes contents to the file at input filename. Creates file and recursively * * creates directory if not existing. * * Arguments: * * scope: A Scope object * * filename: scalar. The name of the file to which we write the contents. * * contents: scalar. The content to be written to the output file. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class WriteFile extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public WriteFile(Pointer p) { super(p); } public WriteFile(@Const @ByRef Scope scope, @ByVal Input filename, @ByVal Input contents) { super((Pointer)null); allocate(scope, filename, contents); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input filename, @ByVal Input contents); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public native @ByRef Operation operation(); public native WriteFile operation(Operation operation); } /** \} */ // namespace ops // namespace tensorflow // #endif // TENSORFLOW_CC_OPS_IO_OPS_H_ // Parsed from tensorflow/cc/ops/linalg_ops.h // This file is MACHINE GENERATED! Do not edit. // #ifndef TENSORFLOW_CC_OPS_LINALG_OPS_H_ // #define TENSORFLOW_CC_OPS_LINALG_OPS_H_ // This file is MACHINE GENERATED! Do not edit. // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/cc/framework/scope.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/gtl/array_slice.h" /** \defgroup linalg_ops Linalg Ops * \{

* Computes the Cholesky decomposition of one or more square matrices. * * The input is a tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions * form square matrices. * * The input has to be symmetric and positive definite. Only the lower-triangular * part of the input will be used for this operation. The upper-triangular part * will not be read. * * The output is a tensor of the same shape as the input * containing the Cholesky decompositions for all input submatrices {@code [..., :, :]}. * * **Note**: The gradient computation on GPU is faster for large matrices but * not for large batch dimensions when the submatrices are small. In this * case it might be faster to use the CPU. * * Arguments: * * scope: A Scope object * * input: Shape is {@code [..., M, M]}. * * Returns: * * {@code Output}: Shape is {@code [..., M, M]}. */ @Namespace("tensorflow::ops") @NoOffset public static class Cholesky extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Cholesky(Pointer p) { super(p); } public Cholesky(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Cholesky operation(Operation operation); public native @ByRef Output output(); public native Cholesky output(Output output); } /** Computes the reverse mode backpropagated gradient of the Cholesky algorithm. * * For an explanation see "Differentiation of the Cholesky algorithm" by * Iain Murray http://arxiv.org/abs/1602.07527. * * Arguments: * * scope: A Scope object * * l: Output of batch Cholesky algorithm l = cholesky(A). Shape is {@code [..., M, M]}. * Algorithm depends only on lower triangular part of the innermost matrices of * this tensor. * * grad: df/dl where f is some scalar function. Shape is {@code [..., M, M]}. * Algorithm depends only on lower triangular part of the innermost matrices of * this tensor. * * Returns: * * {@code Output}: Symmetrized version of df/dA . Shape is {@code [..., M, M]} */ @Namespace("tensorflow::ops") @NoOffset public static class CholeskyGrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CholeskyGrad(Pointer p) { super(p); } public CholeskyGrad(@Const @ByRef Scope scope, @ByVal Input l, @ByVal Input grad) { super((Pointer)null); allocate(scope, l, grad); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input l, @ByVal Input grad); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native CholeskyGrad operation(Operation operation); public native @ByRef Output output(); public native CholeskyGrad output(Output output); } /** Computes the sign and the log of the absolute value of the determinant of * * one or more square matrices. * * The input is a tensor of shape {@code [N, M, M]} whose inner-most 2 dimensions * form square matrices. The outputs are two tensors containing the signs and * absolute values of the log determinants for all N input submatrices * {@code [..., :, :]} such that the determinant = sign*exp(log_abs_determinant). * The log_abs_determinant is computed as det(P)*sum(log(diag(LU))) where LU * is the LU decomposition of the input and P is the corresponding * permutation matrix. * * Arguments: * * scope: A Scope object * * input: Shape is {@code [N, M, M]}. * * Returns: * * {@code Output} sign: The signs of the log determinants of the inputs. Shape is {@code [N]}. * * {@code Output} log_abs_determinant: The logs of the absolute values of the determinants * of the N input matrices. Shape is {@code [N]}. */ @Namespace("tensorflow::ops") @NoOffset public static class LogMatrixDeterminant extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LogMatrixDeterminant(Pointer p) { super(p); } public LogMatrixDeterminant(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public native @ByRef Operation operation(); public native LogMatrixDeterminant operation(Operation operation); public native @ByRef Output sign(); public native LogMatrixDeterminant sign(Output sign); public native @ByRef Output log_abs_determinant(); public native LogMatrixDeterminant log_abs_determinant(Output log_abs_determinant); } /** Computes the determinant of one or more square matrices. * * The input is a tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions * form square matrices. The output is a tensor containing the determinants * for all input submatrices {@code [..., :, :]}. * * Arguments: * * scope: A Scope object * * input: Shape is {@code [..., M, M]}. * * Returns: * * {@code Output}: Shape is {@code [...]}. */ @Namespace("tensorflow::ops") @NoOffset public static class MatrixDeterminant extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MatrixDeterminant(Pointer p) { super(p); } public MatrixDeterminant(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native MatrixDeterminant operation(Operation operation); public native @ByRef Output output(); public native MatrixDeterminant output(Output output); } /** Computes the inverse of one or more square invertible matrices or their * * adjoints (conjugate transposes). * * The input is a tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions * form square matrices. The output is a tensor of the same shape as the input * containing the inverse for all input submatrices {@code [..., :, :]}. * * The op uses LU decomposition with partial pivoting to compute the inverses. * * If a matrix is not invertible there is no guarantee what the op does. It * may detect the condition and raise an exception or it may simply return a * garbage result. * * Arguments: * * scope: A Scope object * * input: Shape is {@code [..., M, M]}. * * Returns: * * {@code Output}: Shape is {@code [..., M, M]}. * * \compatibility(numpy) * Equivalent to np.linalg.inv * \end_compatibility */ @Namespace("tensorflow::ops") @NoOffset public static class MatrixInverse extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MatrixInverse(Pointer p) { super(p); } /** Optional attribute setters for MatrixInverse */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to false */ public native @ByVal Attrs Adjoint(@Cast("bool") boolean x); public native @Cast("bool") boolean adjoint_(); public native Attrs adjoint_(boolean adjoint_); } public MatrixInverse(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public MatrixInverse(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Adjoint(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native MatrixInverse operation(Operation operation); public native @ByRef Output output(); public native MatrixInverse output(Output output); } /** Solves systems of linear equations. * * {@code Matrix} is a tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions * form square matrices. {@code Rhs} is a tensor of shape {@code [..., M, K]}. The {@code output} is * a tensor shape {@code [..., M, K]}. If {@code adjoint} is {@code False} then each output matrix * satisfies {@code matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]}. * If {@code adjoint} is {@code True} then each output matrix satisfies * {@code adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]}. * * Arguments: * * scope: A Scope object * * matrix: Shape is {@code [..., M, M]}. * * rhs: Shape is {@code [..., M, K]}. * * Optional attributes (see {@code Attrs}): * * adjoint: Boolean indicating whether to solve with {@code matrix} or its (block-wise) * adjoint. * * Returns: * * {@code Output}: Shape is {@code [..., M, K]}. */ @Namespace("tensorflow::ops") @NoOffset public static class MatrixSolve extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MatrixSolve(Pointer p) { super(p); } /** Optional attribute setters for MatrixSolve */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Boolean indicating whether to solve with {@code matrix} or its (block-wise) * adjoint. * * Defaults to false */ public native @ByVal Attrs Adjoint(@Cast("bool") boolean x); public native @Cast("bool") boolean adjoint_(); public native Attrs adjoint_(boolean adjoint_); } public MatrixSolve(@Const @ByRef Scope scope, @ByVal Input matrix, @ByVal Input rhs) { super((Pointer)null); allocate(scope, matrix, rhs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input matrix, @ByVal Input rhs); public MatrixSolve(@Const @ByRef Scope scope, @ByVal Input matrix, @ByVal Input rhs, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, matrix, rhs, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input matrix, @ByVal Input rhs, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Adjoint(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native MatrixSolve operation(Operation operation); public native @ByRef Output output(); public native MatrixSolve output(Output output); } /** Solves one or more linear least-squares problems. * * {@code matrix} is a tensor of shape {@code [..., M, N]} whose inner-most 2 dimensions * form real or complex matrices of size {@code [M, N]}. {@code Rhs} is a tensor of the same * type as {@code matrix} and shape {@code [..., M, K]}. * The output is a tensor shape {@code [..., N, K]} where each output matrix solves * each of the equations * {@code matrix[..., :, :]} * {@code output[..., :, :]} = {@code rhs[..., :, :]} * in the least squares sense. * * We use the following notation for (complex) matrix and right-hand sides * in the batch: * * {@code matrix}=\\(A \in \mathbb{C}^{m \times n}\\), * {@code rhs}=\\(B \in \mathbb{C}^{m \times k}\\), * {@code output}=\\(X \in \mathbb{C}^{n \times k}\\), * {@code l2_regularizer}=\\(\lambda \in \mathbb{R}\\). * * If {@code fast} is {@code True}, then the solution is computed by solving the normal * equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then * \\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares * problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 + \lambda ||Z||_F^2\\). * If \\(m \lt n\\) then {@code output} is computed as * \\(X = A^H (A A^H + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the * minimum-norm solution to the under-determined linear system, i.e. * \\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \\), * subject to \\(A Z = B\\). Notice that the fast path is only numerically stable * when \\(A\\) is numerically full rank and has a condition number * \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or \\(\lambda\\) is * sufficiently large. * * If {@code fast} is {@code False} an algorithm based on the numerically robust complete * orthogonal decomposition is used. This computes the minimum-norm * least-squares solution, even when \\(A\\) is rank deficient. This path is * typically 6-7 times slower than the fast path. If {@code fast} is {@code False} then * {@code l2_regularizer} is ignored. * * Arguments: * * scope: A Scope object * * matrix: Shape is {@code [..., M, N]}. * * rhs: Shape is {@code [..., M, K]}. * * l2_regularizer: Scalar tensor. * * \compatibility(numpy) * Equivalent to np.linalg.lstsq * \end_compatibility * * Returns: * * {@code Output}: Shape is {@code [..., N, K]}. */ @Namespace("tensorflow::ops") @NoOffset public static class MatrixSolveLs extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MatrixSolveLs(Pointer p) { super(p); } /** Optional attribute setters for MatrixSolveLs */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to true */ public native @ByVal Attrs Fast(@Cast("bool") boolean x); public native @Cast("bool") boolean fast_(); public native Attrs fast_(boolean fast_); } public MatrixSolveLs(@Const @ByRef Scope scope, @ByVal Input matrix, @ByVal Input rhs, @ByVal Input l2_regularizer) { super((Pointer)null); allocate(scope, matrix, rhs, l2_regularizer); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input matrix, @ByVal Input rhs, @ByVal Input l2_regularizer); public MatrixSolveLs(@Const @ByRef Scope scope, @ByVal Input matrix, @ByVal Input rhs, @ByVal Input l2_regularizer, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, matrix, rhs, l2_regularizer, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input matrix, @ByVal Input rhs, @ByVal Input l2_regularizer, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Fast(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native MatrixSolveLs operation(Operation operation); public native @ByRef Output output(); public native MatrixSolveLs output(Output output); } /** Solves systems of linear equations with upper or lower triangular matrices by * * backsubstitution. * * {@code matrix} is a tensor of shape {@code [..., M, M]} whose inner-most 2 dimensions form * square matrices. If {@code lower} is {@code True} then the strictly upper triangular part * of each inner-most matrix is assumed to be zero and not accessed. * If {@code lower} is False then the strictly lower triangular part of each inner-most * matrix is assumed to be zero and not accessed. * {@code rhs} is a tensor of shape {@code [..., M, K]}. * * The output is a tensor of shape {@code [..., M, K]}. If {@code adjoint} is * {@code True} then the innermost matrices in {@code output} satisfy matrix equations * {@code matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]}. * If {@code adjoint} is {@code False} then the strictly then the innermost matrices in * {@code output} satisfy matrix equations * {@code adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]}. * * Arguments: * * scope: A Scope object * * matrix: Shape is {@code [..., M, M]}. * * rhs: Shape is {@code [..., M, K]}. * * Optional attributes (see {@code Attrs}): * * lower: Boolean indicating whether the innermost matrices in {@code matrix} are * lower or upper triangular. * * adjoint: Boolean indicating whether to solve with {@code matrix} or its (block-wise) * adjoint. * * \compatibility(numpy) * Equivalent to scipy.linalg.solve_triangular * \end_compatibility * * Returns: * * {@code Output}: Shape is {@code [..., M, K]}. */ @Namespace("tensorflow::ops") @NoOffset public static class MatrixTriangularSolve extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MatrixTriangularSolve(Pointer p) { super(p); } /** Optional attribute setters for MatrixTriangularSolve */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Boolean indicating whether the innermost matrices in {@code matrix} are * lower or upper triangular. * * Defaults to true */ /// /// public native @ByVal Attrs Lower(@Cast("bool") boolean x); /** Boolean indicating whether to solve with {@code matrix} or its (block-wise) * adjoint. * * \compatibility(numpy) * Equivalent to scipy.linalg.solve_triangular * \end_compatibility * * Defaults to false */ public native @ByVal Attrs Adjoint(@Cast("bool") boolean x); public native @Cast("bool") boolean lower_(); public native Attrs lower_(boolean lower_); public native @Cast("bool") boolean adjoint_(); public native Attrs adjoint_(boolean adjoint_); } public MatrixTriangularSolve(@Const @ByRef Scope scope, @ByVal Input matrix, @ByVal Input rhs) { super((Pointer)null); allocate(scope, matrix, rhs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input matrix, @ByVal Input rhs); public MatrixTriangularSolve(@Const @ByRef Scope scope, @ByVal Input matrix, @ByVal Input rhs, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, matrix, rhs, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input matrix, @ByVal Input rhs, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Lower(@Cast("bool") boolean x); public static native @ByVal Attrs Adjoint(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native MatrixTriangularSolve operation(Operation operation); public native @ByRef Output output(); public native MatrixTriangularSolve output(Output output); } /** Computes the QR decompositions of one or more matrices. * * Computes the QR decomposition of each inner matrix in {@code tensor} such that * {@code tensor[..., :, :] = q[..., :, :] * r[..., :,:])} * *

{@code python
 *  # a is a tensor.
 *  # q is a tensor of orthonormal matrices.
 *  # r is a tensor of upper triangular matrices.
 *  q, r = qr(a)
 *  q_full, r_full = qr(a, full_matrices=True)
 *  }
* * Arguments: * * scope: A Scope object * * input: A tensor of shape {@code [..., M, N]} whose inner-most 2 dimensions * form matrices of size {@code [M, N]}. Let {@code P} be the minimum of {@code M} and {@code N}. * * Optional attributes (see {@code Attrs}): * * full_matrices: If true, compute full-sized {@code q} and {@code r}. If false * (the default), compute only the leading {@code P} columns of {@code q}. * * Returns: * * {@code Output} q: Orthonormal basis for range of {@code a}. If {@code full_matrices} is {@code False} then * shape is {@code [..., M, P]}; if {@code full_matrices} is {@code True} then shape is * {@code [..., M, M]}. * * {@code Output} r: Triangular factor. If {@code full_matrices} is {@code False} then shape is * {@code [..., P, N]}. If {@code full_matrices} is {@code True} then shape is {@code [..., M, N]}. */ @Namespace("tensorflow::ops") @NoOffset public static class Qr extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Qr(Pointer p) { super(p); } /** Optional attribute setters for Qr */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, compute full-sized {@code q} and {@code r}. If false * (the default), compute only the leading {@code P} columns of {@code q}. * * Defaults to false */ public native @ByVal Attrs FullMatrices(@Cast("bool") boolean x); public native @Cast("bool") boolean full_matrices_(); public native Attrs full_matrices_(boolean full_matrices_); } public Qr(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public Qr(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs); public static native @ByVal Attrs FullMatrices(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native Qr operation(Operation operation); public native @ByRef Output q(); public native Qr q(Output q); public native @ByRef Output r(); public native Qr r(Output r); } /** Computes the eigen decomposition of one or more square self-adjoint matrices. * * Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in * {@code input} such that {@code input[..., :, :] = v[..., :, :] * diag(e[..., :])}. The eigenvalues * are sorted in non-decreasing order. * *
{@code python
 *  # a is a tensor.
 *  # e is a tensor of eigenvalues.
 *  # v is a tensor of eigenvectors.
 *  e, v = self_adjoint_eig(a)
 *  e = self_adjoint_eig(a, compute_v=False)
 *  }
* * Arguments: * * scope: A Scope object * * input: {@code Tensor} input of shape {@code [N, N]}. * * Optional attributes (see {@code Attrs}): * * compute_v: If {@code True} then eigenvectors will be computed and returned in {@code v}. * Otherwise, only the eigenvalues will be computed. * * Returns: * * {@code Output} e: Eigenvalues. Shape is {@code [N]}. * * {@code Output} v: Eigenvectors. Shape is {@code [N, N]}. */ @Namespace("tensorflow::ops") @NoOffset public static class SelfAdjointEig extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SelfAdjointEig(Pointer p) { super(p); } /** Optional attribute setters for SelfAdjointEig */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True} then eigenvectors will be computed and returned in {@code v}. * Otherwise, only the eigenvalues will be computed. * * Defaults to true */ public native @ByVal Attrs ComputeV(@Cast("bool") boolean x); public native @Cast("bool") boolean compute_v_(); public native Attrs compute_v_(boolean compute_v_); } public SelfAdjointEig(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public SelfAdjointEig(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs); public static native @ByVal Attrs ComputeV(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native SelfAdjointEig operation(Operation operation); public native @ByRef Output e(); public native SelfAdjointEig e(Output e); public native @ByRef Output v(); public native SelfAdjointEig v(Output v); } /** Computes the singular value decompositions of one or more matrices. * * Computes the SVD of each inner matrix in {@code input} such that * {@code input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])} * *
{@code python
 *  # a is a tensor containing a batch of matrices.
 *  # s is a tensor of singular values for each matrix.
 *  # u is the tensor containing of left singular vectors for each matrix.
 *  # v is the tensor containing of right singular vectors for each matrix.
 *  s, u, v = svd(a)
 *  s, _, _ = svd(a, compute_uv=False)
 *  }
* * Arguments: * * scope: A Scope object * * input: A tensor of shape {@code [..., M, N]} whose inner-most 2 dimensions * form matrices of size {@code [M, N]}. Let {@code P} be the minimum of {@code M} and {@code N}. * * Optional attributes (see {@code Attrs}): * * compute_uv: If true, left and right singular vectors will be * computed and returned in {@code u} and {@code v}, respectively. * If false, {@code u} and {@code v} are not set and should never referenced. * * full_matrices: If true, compute full-sized {@code u} and {@code v}. If false * (the default), compute only the leading {@code P} singular vectors. * Ignored if {@code compute_uv} is {@code False}. * * Returns: * * {@code Output} s: Singular values. Shape is {@code [..., P]}. * * {@code Output} u: Left singular vectors. If {@code full_matrices} is {@code False} then shape is * {@code [..., M, P]}; if {@code full_matrices} is {@code True} then shape is * {@code [..., M, M]}. Undefined if {@code compute_uv} is {@code False}. * * {@code Output} v: Left singular vectors. If {@code full_matrices} is {@code False} then shape is * {@code [..., N, P]}. If {@code full_matrices} is {@code True} then shape is {@code [..., N, N]}. * Undefined if {@code compute_uv} is false. */ @Namespace("tensorflow::ops") @NoOffset public static class Svd extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Svd(Pointer p) { super(p); } /** Optional attribute setters for Svd */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, left and right singular vectors will be * computed and returned in {@code u} and {@code v}, respectively. * If false, {@code u} and {@code v} are not set and should never referenced. * * Defaults to true */ /// public native @ByVal Attrs ComputeUv(@Cast("bool") boolean x); /** If true, compute full-sized {@code u} and {@code v}. If false * (the default), compute only the leading {@code P} singular vectors. * Ignored if {@code compute_uv} is {@code False}. * * Defaults to false */ public native @ByVal Attrs FullMatrices(@Cast("bool") boolean x); public native @Cast("bool") boolean compute_uv_(); public native Attrs compute_uv_(boolean compute_uv_); public native @Cast("bool") boolean full_matrices_(); public native Attrs full_matrices_(boolean full_matrices_); } public Svd(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public Svd(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs); public static native @ByVal Attrs ComputeUv(@Cast("bool") boolean x); public static native @ByVal Attrs FullMatrices(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native Svd operation(Operation operation); public native @ByRef Output s(); public native Svd s(Output s); public native @ByRef Output u(); public native Svd u(Output u); public native @ByRef Output v(); public native Svd v(Output v); } /** \} */ // namespace ops // namespace tensorflow // #endif // TENSORFLOW_CC_OPS_LINALG_OPS_H_ // Parsed from tensorflow/cc/ops/logging_ops.h // This file is MACHINE GENERATED! Do not edit. // #ifndef TENSORFLOW_CC_OPS_LOGGING_OPS_H_ // #define TENSORFLOW_CC_OPS_LOGGING_OPS_H_ // This file is MACHINE GENERATED! Do not edit. // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/cc/framework/scope.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/gtl/array_slice.h" /** \defgroup logging_ops Logging Ops * \{

* Asserts that the given condition is true. * * If {@code condition} evaluates to false, print the list of tensors in {@code data}. * {@code summarize} determines how many entries of the tensors to print. * * Arguments: * * scope: A Scope object * * condition: The condition to evaluate. * * data: The tensors to print out when condition is false. * * Optional attributes (see {@code Attrs}): * * summarize: Print this many entries of each tensor. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class Assert extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Assert(Pointer p) { super(p); } /** Optional attribute setters for Assert */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Print this many entries of each tensor. * * Defaults to 3 */ public native @ByVal Attrs Summarize(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long summarize_(); public native Attrs summarize_(long summarize_); } public Assert(@Const @ByRef Scope scope, @ByVal Input condition, @ByVal InputList data) { super((Pointer)null); allocate(scope, condition, data); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input condition, @ByVal InputList data); public Assert(@Const @ByRef Scope scope, @ByVal Input condition, @ByVal InputList data, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, condition, data, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input condition, @ByVal InputList data, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs Summarize(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native Assert operation(Operation operation); } /** Outputs a {@code Summary} protocol buffer with audio. * * The summary has up to {@code max_outputs} summary values containing audio. The * audio is built from {@code tensor} which must be 3-D with shape {@code [batch_size, * frames, channels]} or 2-D with shape {@code [batch_size, frames]}. The values are * assumed to be in the range of {@code [-1.0, 1.0]} with a sample rate of {@code sample_rate}. * * The {@code tag} argument is a scalar {@code Tensor} of type {@code string}. It is used to * build the {@code tag} of the summary values: * * * If {@code max_outputs} is 1, the summary value tag is '*tag* /audio'. * * If {@code max_outputs} is greater than 1, the summary value tags are * generated sequentially as '*tag* /audio/0', '*tag* /audio/1', etc. * * Arguments: * * scope: A Scope object * * tag: Scalar. Used to build the {@code tag} attribute of the summary values. * * tensor: 2-D of shape {@code [batch_size, frames]}. * * sample_rate: The sample rate of the signal in hertz. * * Optional attributes (see {@code Attrs}): * * max_outputs: Max number of batch elements to generate audio for. * * Returns: * * {@code Output}: Scalar. Serialized {@code Summary} protocol buffer. */ @Namespace("tensorflow::ops") @NoOffset public static class AudioSummary extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AudioSummary(Pointer p) { super(p); } /** Optional attribute setters for AudioSummary */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Max number of batch elements to generate audio for. * * Defaults to 3 */ public native @ByVal Attrs MaxOutputs(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long max_outputs_(); public native Attrs max_outputs_(long max_outputs_); } public AudioSummary(@Const @ByRef Scope scope, @ByVal Input tag, @ByVal Input tensor, @ByVal Input sample_rate) { super((Pointer)null); allocate(scope, tag, tensor, sample_rate); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input tag, @ByVal Input tensor, @ByVal Input sample_rate); public AudioSummary(@Const @ByRef Scope scope, @ByVal Input tag, @ByVal Input tensor, @ByVal Input sample_rate, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, tag, tensor, sample_rate, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input tag, @ByVal Input tensor, @ByVal Input sample_rate, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs MaxOutputs(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native AudioSummary operation(Operation operation); public native @ByRef Output summary(); public native AudioSummary summary(Output summary); } /** Outputs a {@code Summary} protocol buffer with a histogram. * * The generated * [{@code Summary}](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) * has one summary value containing a histogram for {@code values}. * * This op reports an {@code InvalidArgument} error if any value is not finite. * * Arguments: * * scope: A Scope object * * tag: Scalar. Tag to use for the {@code Summary.Value}. * * values: Any shape. Values to use to build the histogram. * * Returns: * * {@code Output}: Scalar. Serialized {@code Summary} protocol buffer. */ @Namespace("tensorflow::ops") @NoOffset public static class HistogramSummary extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HistogramSummary(Pointer p) { super(p); } public HistogramSummary(@Const @ByRef Scope scope, @ByVal Input tag, @ByVal Input values) { super((Pointer)null); allocate(scope, tag, values); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input tag, @ByVal Input values); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native HistogramSummary operation(Operation operation); public native @ByRef Output summary(); public native HistogramSummary summary(Output summary); } /** Outputs a {@code Summary} protocol buffer with images. * * The summary has up to {@code max_images} summary values containing images. The * images are built from {@code tensor} which must be 4-D with shape {@code [batch_size, * height, width, channels]} and where {@code channels} can be: * * * 1: {@code tensor} is interpreted as Grayscale. * * 3: {@code tensor} is interpreted as RGB. * * 4: {@code tensor} is interpreted as RGBA. * * The images have the same number of channels as the input tensor. For float * input, the values are normalized one image at a time to fit in the range * {@code [0, 255]}. {@code uint8} values are unchanged. The op uses two different * normalization algorithms: * * * If the input values are all positive, they are rescaled so the largest one * is 255. * * * If any input value is negative, the values are shifted so input value 0.0 * is at 127. They are then rescaled so that either the smallest value is 0, * or the largest one is 255. * * The {@code tag} argument is a scalar {@code Tensor} of type {@code string}. It is used to * build the {@code tag} of the summary values: * * * If {@code max_images} is 1, the summary value tag is '*tag* /image'. * * If {@code max_images} is greater than 1, the summary value tags are * generated sequentially as '*tag* /image/0', '*tag* /image/1', etc. * * The {@code bad_color} argument is the color to use in the generated images for * non-finite input values. It is a {@code uint8} 1-D tensor of length {@code channels}. * Each element must be in the range {@code [0, 255]} (It represents the value of a * pixel in the output image). Non-finite values in the input tensor are * replaced by this tensor in the output image. The default value is the color * red. * * Arguments: * * scope: A Scope object * * tag: Scalar. Used to build the {@code tag} attribute of the summary values. * * tensor: 4-D of shape {@code [batch_size, height, width, channels]} where * {@code channels} is 1, 3, or 4. * * Optional attributes (see {@code Attrs}): * * max_images: Max number of batch elements to generate images for. * * bad_color: Color to use for pixels with non-finite values. * * Returns: * * {@code Output}: Scalar. Serialized {@code Summary} protocol buffer. */ @Namespace("tensorflow::ops") @NoOffset public static class ImageSummary extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ImageSummary(Pointer p) { super(p); } /** Optional attribute setters for ImageSummary */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Max number of batch elements to generate images for. * * Defaults to 3 */ /// public native @ByVal Attrs MaxImages(@Cast("tensorflow::int64") long x); /** Color to use for pixels with non-finite values. * * Defaults to Tensor */ public native @ByVal Attrs BadColor(@Const @ByRef TensorProto x); public native @Cast("tensorflow::int64") long max_images_(); public native Attrs max_images_(long max_images_); public native @ByRef TensorProto bad_color_(); public native Attrs bad_color_(TensorProto bad_color_); } public ImageSummary(@Const @ByRef Scope scope, @ByVal Input tag, @ByVal Input tensor) { super((Pointer)null); allocate(scope, tag, tensor); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input tag, @ByVal Input tensor); public ImageSummary(@Const @ByRef Scope scope, @ByVal Input tag, @ByVal Input tensor, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, tag, tensor, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input tag, @ByVal Input tensor, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs MaxImages(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs BadColor(@Const @ByRef TensorProto x); public native @ByRef Operation operation(); public native ImageSummary operation(Operation operation); public native @ByRef Output summary(); public native ImageSummary summary(Output summary); } /** Merges summaries. * * This op creates a * [{@code Summary}](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) * protocol buffer that contains the union of all the values in the input * summaries. * * When the Op is run, it reports an {@code InvalidArgument} error if multiple values * in the summaries to merge use the same tag. * * Arguments: * * scope: A Scope object * * inputs: Can be of any shape. Each must contain serialized {@code Summary} protocol * buffers. * * Returns: * * {@code Output}: Scalar. Serialized {@code Summary} protocol buffer. */ @Namespace("tensorflow::ops") @NoOffset public static class MergeSummary extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MergeSummary(Pointer p) { super(p); } public MergeSummary(@Const @ByRef Scope scope, @ByVal InputList inputs) { super((Pointer)null); allocate(scope, inputs); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList inputs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native MergeSummary operation(Operation operation); public native @ByRef Output summary(); public native MergeSummary summary(Output summary); } /** Prints a list of tensors. * * Passes {@code input} through to {@code output} and prints {@code data} when evaluating. * * Arguments: * * scope: A Scope object * * input: The tensor passed to {@code output} * * data: A list of tensors to print out when op is evaluated. * * Optional attributes (see {@code Attrs}): * * message: A string, prefix of the error message. * * first_n: Only log {@code first_n} number of times. -1 disables logging. * * summarize: Only print this many entries of each tensor. * * Returns: * * {@code Output}: = The unmodified {@code input} tensor */ @Namespace("tensorflow::ops") @NoOffset public static class Print extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Print(Pointer p) { super(p); } /** Optional attribute setters for Print */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** A string, prefix of the error message. * * Defaults to "" */ /// public native @ByVal Attrs Message(@StringPiece BytePointer x); public native @ByVal Attrs Message(@StringPiece String x); /** Only log {@code first_n} number of times. -1 disables logging. * * Defaults to -1 */ /// public native @ByVal Attrs FirstN(@Cast("tensorflow::int64") long x); /** Only print this many entries of each tensor. * * Defaults to 3 */ public native @ByVal Attrs Summarize(@Cast("tensorflow::int64") long x); public native @StringPiece BytePointer message_(); public native Attrs message_(BytePointer message_); public native @Cast("tensorflow::int64") long first_n_(); public native Attrs first_n_(long first_n_); public native @Cast("tensorflow::int64") long summarize_(); public native Attrs summarize_(long summarize_); } public Print(@Const @ByRef Scope scope, @ByVal Input input, @ByVal InputList data) { super((Pointer)null); allocate(scope, input, data); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal InputList data); public Print(@Const @ByRef Scope scope, @ByVal Input input, @ByVal InputList data, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, data, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal InputList data, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Message(@StringPiece BytePointer x); public static native @ByVal Attrs Message(@StringPiece String x); public static native @ByVal Attrs FirstN(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Summarize(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native Print operation(Operation operation); public native @ByRef Output output(); public native Print output(Output output); } /** Prints a string scalar. * * Prints a string scalar to the desired output_stream. * * Arguments: * * scope: A Scope object * * input: The string scalar to print. * * Optional attributes (see {@code Attrs}): * * output_stream: A string specifying the output stream or logging level to print to. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class PrintV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PrintV2(Pointer p) { super(p); } /** Optional attribute setters for PrintV2 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** A string specifying the output stream or logging level to print to. * * Defaults to "stderr" */ public native @ByVal Attrs OutputStream(@StringPiece BytePointer x); public native @ByVal Attrs OutputStream(@StringPiece String x); public native @StringPiece BytePointer output_stream_(); public native Attrs output_stream_(BytePointer output_stream_); } public PrintV2(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public PrintV2(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs OutputStream(@StringPiece BytePointer x); public static native @ByVal Attrs OutputStream(@StringPiece String x); public native @ByRef Operation operation(); public native PrintV2 operation(Operation operation); } /** Outputs a {@code Summary} protocol buffer with scalar values. * * The input {@code tags} and {@code values} must have the same shape. The generated summary * has a summary value for each tag-value pair in {@code tags} and {@code values}. * * Arguments: * * scope: A Scope object * * tags: Tags for the summary. * * values: Same shape as {@code tags. Values for the summary. * * Returns: * * }Output{@code : Scalar. Serialized }Summary{@code protocol buffer. */ @Namespace("tensorflow::ops") @NoOffset public static class ScalarSummary extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ScalarSummary(Pointer p) { super(p); } public ScalarSummary(@Const @ByRef Scope scope, @ByVal Input tags, @ByVal Input values) { super((Pointer)null); allocate(scope, tags, values); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input tags, @ByVal Input values); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native ScalarSummary operation(Operation operation); public native @ByRef Output summary(); public native ScalarSummary summary(Output summary); } /** Outputs a {@code Summary} protocol buffer with a tensor. * * This op is being phased out in favor of TensorSummaryV2, which lets callers pass * a tag as well as a serialized SummaryMetadata proto string that contains * plugin-specific data. We will keep this op to maintain backwards compatibility. * * Arguments: * * scope: A Scope object * * tensor: A tensor to serialize. * * Optional attributes (see {@code Attrs}): * * description: A json-encoded SummaryDescription proto. * * labels: An unused list of strings. * * display_name: An unused string. * * Returns: * * {@code Output}: The summary tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class TensorSummary extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorSummary(Pointer p) { super(p); } /** Optional attribute setters for TensorSummary */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** A json-encoded SummaryDescription proto. * * Defaults to "" */ /// public native @ByVal Attrs Description(@StringPiece BytePointer x); public native @ByVal Attrs Description(@StringPiece String x); /** An unused list of strings. * * Defaults to [] */ /// public native @ByVal Attrs Labels(@Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector x); /** An unused string. * * Defaults to "" */ public native @ByVal Attrs DisplayName(@StringPiece BytePointer x); public native @ByVal Attrs DisplayName(@StringPiece String x); public native @StringPiece BytePointer description_(); public native Attrs description_(BytePointer description_); public native @ByRef @Cast("tensorflow::gtl::ArraySlice*") StringVector labels_(); public native Attrs labels_(StringVector labels_); public native @StringPiece BytePointer display_name_(); public native Attrs display_name_(BytePointer display_name_); } public TensorSummary(@Const @ByRef Scope scope, @ByVal Input tensor) { super((Pointer)null); allocate(scope, tensor); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input tensor); public TensorSummary(@Const @ByRef Scope scope, @ByVal Input tensor, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, tensor, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input tensor, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Description(@StringPiece BytePointer x); public static native @ByVal Attrs Description(@StringPiece String x); public static native @ByVal Attrs Labels(@Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector x); public static native @ByVal Attrs DisplayName(@StringPiece BytePointer x); public static native @ByVal Attrs DisplayName(@StringPiece String x); public native @ByRef Operation operation(); public native TensorSummary operation(Operation operation); public native @ByRef Output summary(); public native TensorSummary summary(Output summary); } /** Outputs a {@code Summary} protocol buffer with a tensor and per-plugin data. * * Arguments: * * scope: A Scope object * * tag: A string attached to this summary. Used for organization in TensorBoard. * * tensor: A tensor to serialize. * * serialized_summary_metadata: A serialized SummaryMetadata proto. Contains plugin * data. * * Returns: * * {@code Output}: The summary tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class TensorSummaryV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorSummaryV2(Pointer p) { super(p); } public TensorSummaryV2(@Const @ByRef Scope scope, @ByVal Input tag, @ByVal Input tensor, @ByVal Input serialized_summary_metadata) { super((Pointer)null); allocate(scope, tag, tensor, serialized_summary_metadata); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input tag, @ByVal Input tensor, @ByVal Input serialized_summary_metadata); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native TensorSummaryV2 operation(Operation operation); public native @ByRef Output summary(); public native TensorSummaryV2 summary(Output summary); } /** Provides the time since epoch in seconds. * * Returns the timestamp as a {@code float64} for seconds since the Unix epoch. * * Note: the timestamp is computed when the op is executed, not when it is added * to the graph. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The ts tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Timestamp extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Timestamp(Pointer p) { super(p); } public Timestamp(@Const @ByRef Scope scope) { super((Pointer)null); allocate(scope); } private native void allocate(@Const @ByRef Scope scope); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Timestamp operation(Operation operation); public native @ByRef Output ts(); public native Timestamp ts(Output ts); } /** \} */ // namespace ops // namespace tensorflow // #endif // TENSORFLOW_CC_OPS_LOGGING_OPS_H_ // Parsed from tensorflow/cc/ops/math_ops.h // This file is MACHINE GENERATED! Do not edit. // #ifndef TENSORFLOW_CC_OPS_MATH_OPS_H_ // #define TENSORFLOW_CC_OPS_MATH_OPS_H_ // This file is MACHINE GENERATED! Do not edit. // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/cc/framework/scope.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/gtl/array_slice.h" /** \defgroup math_ops Math Ops * \{

* Computes the absolute value of a tensor. * * Given a tensor {@code x}, this operation returns a tensor containing the absolute * value of each element in {@code x}. For example, if x is an input element and y is * an output element, this operation computes \\(y = |x|\\). * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Abs extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Abs(Pointer p) { super(p); } public Abs(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Abs operation(Operation operation); public native @ByRef Output y(); public native Abs y(Output y); } /** Returns the element-wise sum of a list of tensors. * * {@code tf.accumulate_n_v2} performs the same operation as {@code tf.add_n}, but does not * wait for all of its inputs to be ready before beginning to sum. This can * save memory if inputs are ready at different times, since minimum temporary * storage is proportional to the output size rather than the inputs size. * * Unlike the original {@code accumulate_n}, {@code accumulate_n_v2} is differentiable. * * Returns a {@code Tensor} of same shape and type as the elements of {@code inputs}. * * Arguments: * * scope: A Scope object * * inputs: A list of {@code Tensor} objects, each with same shape and type. * * shape: Shape of elements of {@code inputs}. * * Returns: * * {@code Output}: The sum tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class AccumulateNV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AccumulateNV2(Pointer p) { super(p); } public AccumulateNV2(@Const @ByRef Scope scope, @ByVal InputList inputs, @ByVal PartialTensorShape shape) { super((Pointer)null); allocate(scope, inputs, shape); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList inputs, @ByVal PartialTensorShape shape); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native AccumulateNV2 operation(Operation operation); public native @ByRef Output sum(); public native AccumulateNV2 sum(Output sum); } /** Computes acos of x element-wise. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Acos extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Acos(Pointer p) { super(p); } public Acos(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Acos operation(Operation operation); public native @ByRef Output y(); public native Acos y(Output y); } /** Computes inverse hyperbolic cosine of x element-wise. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Acosh extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Acosh(Pointer p) { super(p); } public Acosh(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Acosh operation(Operation operation); public native @ByRef Output y(); public native Acosh y(Output y); } /** Returns x + y element-wise. * * *NOTE*: {@code Add} supports broadcasting. {@code AddN} does not. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Add extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Add(Pointer p) { super(p); } public Add(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Add operation(Operation operation); public native @ByRef Output z(); public native Add z(Output z); } /** Add all input tensors element wise. * * Arguments: * * scope: A Scope object * * inputs: Must all be the same size and shape. * * Returns: * * {@code Output}: The sum tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class AddN extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AddN(Pointer p) { super(p); } public AddN(@Const @ByRef Scope scope, @ByVal InputList inputs) { super((Pointer)null); allocate(scope, inputs); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList inputs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native AddN operation(Operation operation); public native @ByRef Output sum(); public native AddN sum(Output sum); } /** Returns x + y element-wise. * * *NOTE*: {@code Add} supports broadcasting. {@code AddN} does not. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class AddV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AddV2(Pointer p) { super(p); } public AddV2(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native AddV2 operation(Operation operation); public native @ByRef Output z(); public native AddV2 z(Output z); } /** Computes the "logical and" of elements across dimensions of a tensor. * * Reduces {@code input} along the dimensions given in {@code axis}. Unless * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * * Arguments: * * scope: A Scope object * * input: The tensor to reduce. * * axis: The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. * * Optional attributes (see {@code Attrs}): * * keep_dims: If true, retain reduced dimensions with length 1. * * Returns: * * {@code Output}: The reduced tensor. * * Aliases: * * ReduceAll */ @Namespace("tensorflow::ops") @NoOffset public static class All extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public All(Pointer p) { super(p); } /** Optional attribute setters for All */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, retain reduced dimensions with length 1. * * Defaults to false */ public native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @Cast("bool") boolean keep_dims_(); public native Attrs keep_dims_(boolean keep_dims_); } public All(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis) { super((Pointer)null); allocate(scope, input, axis); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis); public All(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, axis, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native All operation(Operation operation); public native @ByRef Output output(); public native All output(Output output); } /// /// /// /// /// /// /// /** Returns the argument of a complex number. * * Given a tensor {@code input} of complex numbers, this operation returns a tensor of * type {@code float} that is the argument of each element in {@code input}. All elements in * {@code input} must be complex numbers of the form \\(a + bj\\), where *a* * is the real part and *b* is the imaginary part. * * The argument returned by this operation is of the form \\(atan2(b, a)\\). * * For example: * *

{@code
 *  # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
 *  tf.angle(input) ==> [2.0132, 1.056]
 *  }
* * \compatibility(numpy) * Equivalent to np.angle. * \end_compatibility * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Angle extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Angle(Pointer p) { super(p); } /** Optional attribute setters for Angle */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_FLOAT */ public native @ByVal Attrs Tout(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int Tout_(); public native Attrs Tout_(int Tout_); } public Angle(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public Angle(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Tout(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native Angle operation(Operation operation); public native @ByRef Output output(); public native Angle output(Output output); } /** Computes the "logical or" of elements across dimensions of a tensor. * * Reduces {@code input} along the dimensions given in {@code axis}. Unless * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * * Arguments: * * scope: A Scope object * * input: The tensor to reduce. * * axis: The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. * * Optional attributes (see {@code Attrs}): * * keep_dims: If true, retain reduced dimensions with length 1. * * Returns: * * {@code Output}: The reduced tensor. * * Aliases: * * ReduceAny */ @Namespace("tensorflow::ops") @NoOffset public static class Any extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Any(Pointer p) { super(p); } /** Optional attribute setters for Any */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, retain reduced dimensions with length 1. * * Defaults to false */ public native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @Cast("bool") boolean keep_dims_(); public native Attrs keep_dims_(boolean keep_dims_); } public Any(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis) { super((Pointer)null); allocate(scope, input, axis); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis); public Any(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, axis, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native Any operation(Operation operation); public native @ByRef Output output(); public native Any output(Output output); } /// /// /** Returns the truth value of abs(x-y) < tolerance element-wise. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class ApproximateEqual extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApproximateEqual(Pointer p) { super(p); } /** Optional attribute setters for ApproximateEqual */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 1e-05 */ public native @ByVal Attrs Tolerance(float x); public native float tolerance_(); public native Attrs tolerance_(float tolerance_); } public ApproximateEqual(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public ApproximateEqual(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, x, y, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Tolerance(float x); public native @ByRef Operation operation(); public native ApproximateEqual operation(Operation operation); public native @ByRef Output z(); public native ApproximateEqual z(Output z); } /** Returns the index with the largest value across dimensions of a tensor. * * Note that in case of ties the identity of the return value is not guaranteed. * * Arguments: * * scope: A Scope object * * dimension: int32 or int64, must be in the range {@code [-rank(input), rank(input))}. * Describes which dimension of the input Tensor to reduce across. For vectors, * use dimension = 0. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class ArgMax extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ArgMax(Pointer p) { super(p); } /** Optional attribute setters for ArgMax */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_INT64 */ public native @ByVal Attrs OutputType(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int output_type_(); public native Attrs output_type_(int output_type_); } public ArgMax(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input dimension) { super((Pointer)null); allocate(scope, input, dimension); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input dimension); public ArgMax(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input dimension, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, dimension, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input dimension, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs OutputType(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native ArgMax operation(Operation operation); public native @ByRef Output output(); public native ArgMax output(Output output); } /** Returns the index with the smallest value across dimensions of a tensor. * * Note that in case of ties the identity of the return value is not guaranteed. * * Arguments: * * scope: A Scope object * * dimension: int32 or int64, must be in the range {@code [-rank(input), rank(input))}. * Describes which dimension of the input Tensor to reduce across. For vectors, * use dimension = 0. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class ArgMin extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ArgMin(Pointer p) { super(p); } /** Optional attribute setters for ArgMin */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_INT64 */ public native @ByVal Attrs OutputType(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int output_type_(); public native Attrs output_type_(int output_type_); } public ArgMin(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input dimension) { super((Pointer)null); allocate(scope, input, dimension); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input dimension); public ArgMin(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input dimension, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, dimension, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input dimension, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs OutputType(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native ArgMin operation(Operation operation); public native @ByRef Output output(); public native ArgMin output(Output output); } /** Computes asin of x element-wise. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Asin extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Asin(Pointer p) { super(p); } public Asin(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Asin operation(Operation operation); public native @ByRef Output y(); public native Asin y(Output y); } /** Computes inverse hyperbolic sine of x element-wise. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Asinh extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Asinh(Pointer p) { super(p); } public Asinh(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Asinh operation(Operation operation); public native @ByRef Output y(); public native Asinh y(Output y); } /** Computes atan of x element-wise. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Atan extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Atan(Pointer p) { super(p); } public Atan(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Atan operation(Operation operation); public native @ByRef Output y(); public native Atan y(Output y); } /** Computes arctangent of {@code y/x} element-wise, respecting signs of the arguments. * * This is the angle \( \theta \in [-\pi, \pi] \) such that * \[ x = r \cos(\theta) \] * and * \[ y = r \sin(\theta) \] * where \(r = \sqrt(x^2 + y^2) \). * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Atan2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Atan2(Pointer p) { super(p); } public Atan2(@Const @ByRef Scope scope, @ByVal Input y, @ByVal Input x) { super((Pointer)null); allocate(scope, y, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input y, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Atan2 operation(Operation operation); public native @ByRef Output z(); public native Atan2 z(Output z); } /** Computes inverse hyperbolic tangent of x element-wise. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Atanh extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Atanh(Pointer p) { super(p); } public Atanh(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Atanh operation(Operation operation); public native @ByRef Output y(); public native Atanh y(Output y); } /** Multiplies slices of two tensors in batches. * * Multiplies all slices of {@code Tensor} {@code x} and {@code y} (each slice can be * viewed as an element of a batch), and arranges the individual results * in a single output tensor of the same batch size. Each of the * individual slices can optionally be adjointed (to adjoint a matrix * means to transpose and conjugate it) before multiplication by setting * the {@code adj_x} or {@code adj_y} flag to {@code True}, which are by default {@code False}. * * The input tensors {@code x} and {@code y} are 2-D or higher with shape {@code [..., r_x, c_x]} * and {@code [..., r_y, c_y]}. * * The output tensor is 2-D or higher with shape {@code [..., r_o, c_o]}, where: * * r_o = c_x if adj_x else r_x * c_o = r_y if adj_y else c_y * * It is computed as: * * output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) * * Arguments: * * scope: A Scope object * * x: 2-D or higher with shape {@code [..., r_x, c_x]}. * * y: 2-D or higher with shape {@code [..., r_y, c_y]}. * * Optional attributes (see {@code Attrs}): * * adj_x: If {@code True}, adjoint the slices of {@code x}. Defaults to {@code False}. * * adj_y: If {@code True}, adjoint the slices of {@code y}. Defaults to {@code False}. * * Returns: * * {@code Output}: 3-D or higher with shape {@code [..., r_o, c_o]} */ @Namespace("tensorflow::ops") @NoOffset public static class BatchMatMul extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchMatMul(Pointer p) { super(p); } /** Optional attribute setters for BatchMatMul */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, adjoint the slices of {@code x}. Defaults to {@code False}. * * Defaults to false */ /// public native @ByVal Attrs AdjX(@Cast("bool") boolean x); /** If {@code True}, adjoint the slices of {@code y}. Defaults to {@code False}. * * Defaults to false */ public native @ByVal Attrs AdjY(@Cast("bool") boolean x); public native @Cast("bool") boolean adj_x_(); public native Attrs adj_x_(boolean adj_x_); public native @Cast("bool") boolean adj_y_(); public native Attrs adj_y_(boolean adj_y_); } public BatchMatMul(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public BatchMatMul(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, x, y, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs AdjX(@Cast("bool") boolean x); public static native @ByVal Attrs AdjY(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native BatchMatMul operation(Operation operation); public native @ByRef Output output(); public native BatchMatMul output(Output output); } /** Computes the Bessel i0e function of {@code x} element-wise. * * Exponentially scaled modified Bessel function of order 0 defined as * {@code bessel_i0e(x) = exp(-abs(x)) bessel_i0(x)}. * * This function is faster and numerically stabler than {@code bessel_i0(x)}. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class BesselI0e extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BesselI0e(Pointer p) { super(p); } public BesselI0e(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native BesselI0e operation(Operation operation); public native @ByRef Output y(); public native BesselI0e y(Output y); } /** Computes the Bessel i1e function of {@code x} element-wise. * * Exponentially scaled modified Bessel function of order 0 defined as * {@code bessel_i1e(x) = exp(-abs(x)) bessel_i1(x)}. * * This function is faster and numerically stabler than {@code bessel_i1(x)}. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class BesselI1e extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BesselI1e(Pointer p) { super(p); } public BesselI1e(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native BesselI1e operation(Operation operation); public native @ByRef Output y(); public native BesselI1e y(Output y); } /** Compute the regularized incomplete beta integral \\(I_x(a, b)\\). * * The regularized incomplete beta integral is defined as: * * * \\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\) * * where * * * \\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\) * * * is the incomplete beta function and \\(B(a, b)\\) is the *complete* * beta function. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Betainc extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Betainc(Pointer p) { super(p); } public Betainc(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input b, @ByVal Input x) { super((Pointer)null); allocate(scope, a, b, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input b, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Betainc operation(Operation operation); public native @ByRef Output z(); public native Betainc z(Output z); } /** Counts the number of occurrences of each value in an integer array. * * Outputs a vector with length {@code size} and the same dtype as {@code weights}. If * {@code weights} are empty, then index {@code i} stores the number of times the value {@code i} is * counted in {@code arr}. If {@code weights} are non-empty, then index {@code i} stores the sum of * the value in {@code weights} at each index where the corresponding value in {@code arr} is * {@code i}. * * Values in {@code arr} outside of the range [0, size) are ignored. * * Arguments: * * scope: A Scope object * * arr: int32 {@code Tensor}. * * size: non-negative int32 scalar {@code Tensor}. * * weights: is an int32, int64, float32, or float64 {@code Tensor} with the same * shape as {@code arr}, or a length-0 {@code Tensor}, in which case it acts as all weights * equal to 1. * * Returns: * * {@code Output}: 1D {@code Tensor} with length equal to {@code size}. The counts or summed weights for * each value in the range [0, size). */ @Namespace("tensorflow::ops") @NoOffset public static class Bincount extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Bincount(Pointer p) { super(p); } public Bincount(@Const @ByRef Scope scope, @ByVal Input arr, @ByVal Input size, @ByVal Input weights) { super((Pointer)null); allocate(scope, arr, size, weights); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input arr, @ByVal Input size, @ByVal Input weights); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Bincount operation(Operation operation); public native @ByRef Output bins(); public native Bincount bins(Output bins); } /** Bucketizes 'input' based on 'boundaries'. * * For example, if the inputs are * boundaries = [0, 10, 100] * input = [[-5, 10000] * [150, 10] * [5, 100]] * * then the output will be * output = [[0, 3] * [3, 2] * [1, 3]] * * Arguments: * * scope: A Scope object * * input: Any shape of Tensor contains with int or float type. * * boundaries: A sorted list of floats gives the boundary of the buckets. * * Returns: * * {@code Output}: Same shape with 'input', each value of input replaced with bucket index. * * \compatibility(numpy) * Equivalent to np.digitize. * \end_compatibility */ @Namespace("tensorflow::ops") @NoOffset public static class Bucketize extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Bucketize(Pointer p) { super(p); } public Bucketize(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice FloatPointer boundaries) { super((Pointer)null); allocate(scope, input, boundaries); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice FloatPointer boundaries); public Bucketize(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice FloatBuffer boundaries) { super((Pointer)null); allocate(scope, input, boundaries); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice FloatBuffer boundaries); public Bucketize(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice float... boundaries) { super((Pointer)null); allocate(scope, input, boundaries); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice float... boundaries); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Bucketize operation(Operation operation); public native @ByRef Output output(); public native Bucketize output(Output output); } /** Cast x of type SrcT to y of DstT. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Name("tensorflow::ops::Cast") @NoOffset public static class CastOp extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CastOp(Pointer p) { super(p); } /** Optional attribute setters for Cast */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to false */ public native @ByVal Attrs Truncate(@Cast("bool") boolean x); public native @Cast("bool") boolean Truncate_(); public native Attrs Truncate_(boolean Truncate_); } public CastOp(@Const @ByRef Scope scope, @ByVal Input x, @Cast("tensorflow::DataType") int DstT) { super((Pointer)null); allocate(scope, x, DstT); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @Cast("tensorflow::DataType") int DstT); public CastOp(@Const @ByRef Scope scope, @ByVal Input x, @Cast("tensorflow::DataType") int DstT, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, x, DstT, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @Cast("tensorflow::DataType") int DstT, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Truncate(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native CastOp operation(Operation operation); public native @ByRef Output y(); public native CastOp y(Output y); } /** Returns element-wise smallest integer not less than x. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Ceil extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Ceil(Pointer p) { super(p); } public Ceil(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Ceil operation(Operation operation); public native @ByRef Output y(); public native Ceil y(Output y); } /** Clips tensor values to a specified min and max. * * Given a tensor {@code t}, this operation returns a tensor of the same type and * shape as {@code t} with its values clipped to {@code clip_value_min} and {@code clip_value_max}. * Any values less than {@code clip_value_min} are set to {@code clip_value_min}. Any values * greater than {@code clip_value_max} are set to {@code clip_value_max}. * * Arguments: * * scope: A Scope object * * t: A {@code Tensor}. * * clip_value_min: A 0-D (scalar) {@code Tensor}, or a {@code Tensor} with the same shape * as {@code t}. The minimum value to clip by. * * clip_value_max: A 0-D (scalar) {@code Tensor}, or a {@code Tensor} with the same shape * as {@code t}. The maximum value to clip by. * * Returns: * * {@code Output}: A clipped {@code Tensor} with the same shape as input 't'. */ @Namespace("tensorflow::ops") @NoOffset public static class ClipByValue extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ClipByValue(Pointer p) { super(p); } public ClipByValue(@Const @ByRef Scope scope, @ByVal Input t, @ByVal Input clip_value_min, @ByVal Input clip_value_max) { super((Pointer)null); allocate(scope, t, clip_value_min, clip_value_max); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input t, @ByVal Input clip_value_min, @ByVal Input clip_value_max); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native ClipByValue operation(Operation operation); public native @ByRef Output output(); public native ClipByValue output(Output output); } /** Compare values of {@code input} to {@code threshold} and pack resulting bits into a {@code uint8}. * * Each comparison returns a boolean {@code true} (if {@code input_value > threshold}) * or and {@code false} otherwise. * * This operation is useful for Locality-Sensitive-Hashing (LSH) and other * algorithms that use hashing approximations of cosine and {@code L2} distances; * codes can be generated from an input via: * *
{@code python
 *  codebook_size = 50
 *  codebook_bits = codebook_size * 32
 *  codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits],
 *                             dtype=x.dtype,
 *                             initializer=tf.orthogonal_initializer())
 *  codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.)
 *  codes = tf.bitcast(codes, tf.int32)  # go from uint8 to int32
 *  # now codes has shape x.shape[:-1] + [codebook_size]
 *  }
* * **NOTE**: Currently, the innermost dimension of the tensor must be divisible * by 8. * * Given an {@code input} shaped {@code [s0, s1, ..., s_n]}, the output is * a {@code uint8} tensor shaped {@code [s0, s1, ..., s_n / 8]}. * * Arguments: * * scope: A Scope object * * input: Values to compare against {@code threshold} and bitpack. * * threshold: Threshold to compare against. * * Returns: * * {@code Output}: The bitpacked comparisons. */ @Namespace("tensorflow::ops") @NoOffset public static class CompareAndBitpack extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CompareAndBitpack(Pointer p) { super(p); } public CompareAndBitpack(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input threshold) { super((Pointer)null); allocate(scope, input, threshold); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input threshold); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native CompareAndBitpack operation(Operation operation); public native @ByRef Output output(); public native CompareAndBitpack output(Output output); } /** Converts two real numbers to a complex number. * * Given a tensor {@code real} representing the real part of a complex number, and a * tensor {@code imag} representing the imaginary part of a complex number, this * operation returns complex numbers elementwise of the form \\(a + bj\\), where * *a* represents the {@code real} part and *b* represents the {@code imag} part. * * The input tensors {@code real} and {@code imag} must have the same shape. * * For example: * *
{@code
 *  # tensor 'real' is [2.25, 3.25]
 *  # tensor `imag` is [4.75, 5.75]
 *  tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
 *  }
* * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The out tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Complex extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Complex(Pointer p) { super(p); } /** Optional attribute setters for Complex */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_COMPLEX64 */ public native @ByVal Attrs Tout(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int Tout_(); public native Attrs Tout_(int Tout_); } public Complex(@Const @ByRef Scope scope, @ByVal Input real, @ByVal Input imag) { super((Pointer)null); allocate(scope, real, imag); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input real, @ByVal Input imag); public Complex(@Const @ByRef Scope scope, @ByVal Input real, @ByVal Input imag, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, real, imag, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input real, @ByVal Input imag, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Tout(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native Complex operation(Operation operation); public native @ByRef Output out(); public native Complex out(Output out); } /** Computes the complex absolute value of a tensor. * * Given a tensor {@code x} of complex numbers, this operation returns a tensor of type * {@code float} or {@code double} that is the absolute value of each element in {@code x}. All * elements in {@code x} must be complex numbers of the form \\(a + bj\\). The absolute * value is computed as \\( \sqrt{a^2 + b^2}\\). * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class ComplexAbs extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ComplexAbs(Pointer p) { super(p); } /** Optional attribute setters for ComplexAbs */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_FLOAT */ public native @ByVal Attrs Tout(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int Tout_(); public native Attrs Tout_(int Tout_); } public ComplexAbs(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public ComplexAbs(@Const @ByRef Scope scope, @ByVal Input x, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, x, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Tout(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native ComplexAbs operation(Operation operation); public native @ByRef Output y(); public native ComplexAbs y(Output y); } /** Returns the complex conjugate of a complex number. * * Given a tensor {@code input} of complex numbers, this operation returns a tensor of * complex numbers that are the complex conjugate of each element in {@code input}. The * complex numbers in {@code input} must be of the form \\(a + bj\\), where *a* is the * real part and *b* is the imaginary part. * * The complex conjugate returned by this operation is of the form \\(a - bj\\). * * For example: * *
{@code
 *  # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
 *  tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
 *  }
* * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Conj extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conj(Pointer p) { super(p); } public Conj(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Conj operation(Operation operation); public native @ByRef Output output(); public native Conj output(Output output); } /** Computes cos of x element-wise. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Cos extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Cos(Pointer p) { super(p); } public Cos(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Cos operation(Operation operation); public native @ByRef Output y(); public native Cos y(Output y); } /** Computes hyperbolic cosine of x element-wise. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Cosh extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Cosh(Pointer p) { super(p); } public Cosh(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Cosh operation(Operation operation); public native @ByRef Output y(); public native Cosh y(Output y); } /** Compute the pairwise cross product. * * {@code a} and {@code b} must be the same shape; they can either be simple 3-element vectors, * or any shape where the innermost dimension is 3. In the latter case, each pair * of corresponding 3-element vectors is cross-multiplied independently. * * Arguments: * * scope: A Scope object * * a: A tensor containing 3-element vectors. * * b: Another tensor, of same type and shape as {@code a}. * * Returns: * * {@code Output}: Pairwise cross product of the vectors in {@code a} and {@code b}. */ @Namespace("tensorflow::ops") @NoOffset public static class Cross extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Cross(Pointer p) { super(p); } public Cross(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input b) { super((Pointer)null); allocate(scope, a, b); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input b); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Cross operation(Operation operation); public native @ByRef Output product(); public native Cross product(Output product); } /** Compute the cumulative product of the tensor {@code x} along {@code axis}. * * By default, this op performs an inclusive cumprod, which means that the first * element of the input is identical to the first element of the output: * *
{@code python
 *  tf.cumprod([a, b, c])  # => [a, a * b, a * b * c]
 *  }
* * By setting the {@code exclusive} kwarg to {@code True}, an exclusive cumprod is * performed instead: * *
{@code python
 *  tf.cumprod([a, b, c], exclusive=True)  # => [1, a, a * b]
 *  }
* * By setting the {@code reverse} kwarg to {@code True}, the cumprod is performed in the * opposite direction: * *
{@code python
 *  tf.cumprod([a, b, c], reverse=True)  # => [a * b * c, b * c, c]
 *  }
* * This is more efficient than using separate {@code tf.reverse} ops. * * The {@code reverse} and {@code exclusive} kwargs can also be combined: * *
{@code python
 *  tf.cumprod([a, b, c], exclusive=True, reverse=True)  # => [b * c, c, 1]
 *  }
* * Arguments: * * scope: A Scope object * * x: A {@code Tensor}. Must be one of the following types: {@code float32}, {@code float64}, * {@code int64}, {@code int32}, {@code uint8}, {@code uint16}, {@code int16}, {@code int8}, {@code complex64}, * {@code complex128}, {@code qint8}, {@code quint8}, {@code qint32}, {@code half}. * * axis: A {@code Tensor} of type {@code int32} (default: 0). Must be in the range * {@code [-rank(x), rank(x))}. * * Optional attributes (see {@code Attrs}): * * exclusive: If {@code True}, perform exclusive cumprod. * * reverse: A {@code bool} (default: False). * * Returns: * * {@code Output}: The out tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Cumprod extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Cumprod(Pointer p) { super(p); } /** Optional attribute setters for Cumprod */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, perform exclusive cumprod. * * Defaults to false */ /// public native @ByVal Attrs Exclusive(@Cast("bool") boolean x); /** A {@code bool} (default: False). * * Defaults to false */ public native @ByVal Attrs Reverse(@Cast("bool") boolean x); public native @Cast("bool") boolean exclusive_(); public native Attrs exclusive_(boolean exclusive_); public native @Cast("bool") boolean reverse_(); public native Attrs reverse_(boolean reverse_); } public Cumprod(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input axis) { super((Pointer)null); allocate(scope, x, axis); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input axis); public Cumprod(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input axis, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, x, axis, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input axis, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Exclusive(@Cast("bool") boolean x); public static native @ByVal Attrs Reverse(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native Cumprod operation(Operation operation); public native @ByRef Output out(); public native Cumprod out(Output out); } /** Compute the cumulative sum of the tensor {@code x} along {@code axis}. * * By default, this op performs an inclusive cumsum, which means that the first * element of the input is identical to the first element of the output: * *
{@code python
 *  tf.cumsum([a, b, c])  # => [a, a + b, a + b + c]
 *  }
* * By setting the {@code exclusive} kwarg to {@code True}, an exclusive cumsum is * performed instead: * *
{@code python
 *  tf.cumsum([a, b, c], exclusive=True)  # => [0, a, a + b]
 *  }
* * By setting the {@code reverse} kwarg to {@code True}, the cumsum is performed in the * opposite direction: * *
{@code python
 *  tf.cumsum([a, b, c], reverse=True)  # => [a + b + c, b + c, c]
 *  }
* * This is more efficient than using separate {@code tf.reverse} ops. * * The {@code reverse} and {@code exclusive} kwargs can also be combined: * *
{@code python
 *  tf.cumsum([a, b, c], exclusive=True, reverse=True)  # => [b + c, c, 0]
 *  }
* * Arguments: * * scope: A Scope object * * x: A {@code Tensor}. Must be one of the following types: {@code float32}, {@code float64}, * {@code int64}, {@code int32}, {@code uint8}, {@code uint16}, {@code int16}, {@code int8}, {@code complex64}, * {@code complex128}, {@code qint8}, {@code quint8}, {@code qint32}, {@code half}. * * axis: A {@code Tensor} of type {@code int32} (default: 0). Must be in the range * {@code [-rank(x), rank(x))}. * * Optional attributes (see {@code Attrs}): * * exclusive: If {@code True}, perform exclusive cumsum. * * reverse: A {@code bool} (default: False). * * Returns: * * {@code Output}: The out tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Cumsum extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Cumsum(Pointer p) { super(p); } /** Optional attribute setters for Cumsum */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, perform exclusive cumsum. * * Defaults to false */ /// public native @ByVal Attrs Exclusive(@Cast("bool") boolean x); /** A {@code bool} (default: False). * * Defaults to false */ public native @ByVal Attrs Reverse(@Cast("bool") boolean x); public native @Cast("bool") boolean exclusive_(); public native Attrs exclusive_(boolean exclusive_); public native @Cast("bool") boolean reverse_(); public native Attrs reverse_(boolean reverse_); } public Cumsum(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input axis) { super((Pointer)null); allocate(scope, x, axis); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input axis); public Cumsum(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input axis, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, x, axis, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input axis, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Exclusive(@Cast("bool") boolean x); public static native @ByVal Attrs Reverse(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native Cumsum operation(Operation operation); public native @ByRef Output out(); public native Cumsum out(Output out); } /** Computes Psi, the derivative of Lgamma (the log of the absolute value of * * {@code Gamma(x)}), element-wise. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Digamma extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Digamma(Pointer p) { super(p); } public Digamma(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Digamma operation(Operation operation); public native @ByRef Output y(); public native Digamma y(Output y); } /** Returns x / y element-wise. * * *NOTE*: {@code Div} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Div extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Div(Pointer p) { super(p); } public Div(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Div operation(Operation operation); public native @ByRef Output z(); public native Div z(Output z); } /** Returns 0 if the denominator is zero. * * * *NOTE*: {@code DivNoNan} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class DivNoNan extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DivNoNan(Pointer p) { super(p); } public DivNoNan(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native DivNoNan operation(Operation operation); public native @ByRef Output z(); public native DivNoNan z(Output z); } /** Returns the truth value of (x == y) element-wise. * * *NOTE*: {@code Equal} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Equal extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Equal(Pointer p) { super(p); } public Equal(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Equal operation(Operation operation); public native @ByRef Output z(); public native Equal z(Output z); } /** Computes the Gauss error function of {@code x} element-wise. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Erf extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Erf(Pointer p) { super(p); } public Erf(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Erf operation(Operation operation); public native @ByRef Output y(); public native Erf y(Output y); } /** Computes the complementary error function of {@code x} element-wise. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Erfc extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Erfc(Pointer p) { super(p); } public Erfc(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Erfc operation(Operation operation); public native @ByRef Output y(); public native Erfc y(Output y); } /** Computes exponential of x element-wise. \\(y = e^x\\). * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Exp extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Exp(Pointer p) { super(p); } public Exp(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Exp operation(Operation operation); public native @ByRef Output y(); public native Exp y(Output y); } /** Computes exponential of x - 1 element-wise. * * I.e., \\(y = (\exp x) - 1\\). * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Expm1 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Expm1(Pointer p) { super(p); } public Expm1(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Expm1 operation(Operation operation); public native @ByRef Output y(); public native Expm1 y(Output y); } /** Returns element-wise largest integer not greater than x. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Floor extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Floor(Pointer p) { super(p); } public Floor(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Floor operation(Operation operation); public native @ByRef Output y(); public native Floor y(Output y); } /** Returns x // y element-wise. * * *NOTE*: {@code FloorDiv} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class FloorDiv extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FloorDiv(Pointer p) { super(p); } public FloorDiv(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native FloorDiv operation(Operation operation); public native @ByRef Output z(); public native FloorDiv z(Output z); } /** Returns element-wise remainder of division. When {@code x < 0} xor {@code y < 0} is * * true, this follows Python semantics in that the result here is consistent * with a flooring divide. E.g. {@code floor(x / y) * y + mod(x, y) = x}. * * *NOTE*: {@code FloorMod} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class FloorMod extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FloorMod(Pointer p) { super(p); } public FloorMod(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native FloorMod operation(Operation operation); public native @ByRef Output z(); public native FloorMod z(Output z); } /** Returns the truth value of (x > y) element-wise. * * *NOTE*: {@code Greater} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Greater extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Greater(Pointer p) { super(p); } public Greater(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Greater operation(Operation operation); public native @ByRef Output z(); public native Greater z(Output z); } /** Returns the truth value of (x >= y) element-wise. * * *NOTE*: {@code GreaterEqual} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class GreaterEqual extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GreaterEqual(Pointer p) { super(p); } public GreaterEqual(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native GreaterEqual operation(Operation operation); public native @ByRef Output z(); public native GreaterEqual z(Output z); } /** Return histogram of values. * * Given the tensor {@code values}, this operation returns a rank 1 histogram counting * the number of entries in {@code values} that fall into every bin. The bins are * equal width and determined by the arguments {@code value_range} and {@code nbins}. * *
{@code python
 *  # Bins will be:  (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
 *  nbins = 5
 *  value_range = [0.0, 5.0]
 *  new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
 * 
 *  with tf.get_default_session() as sess:
 *    hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
 *    variables.global_variables_initializer().run()
 *    sess.run(hist) => [2, 1, 1, 0, 2]
 *  }
* * Arguments: * * scope: A Scope object * * values: Numeric {@code Tensor}. * * value_range: Shape [2] {@code Tensor} of same {@code dtype} as {@code values}. * values <= value_range[0] will be mapped to hist[0], * values >= value_range[1] will be mapped to hist[-1]. * * nbins: Scalar {@code int32 Tensor}. Number of histogram bins. * * Returns: * * {@code Output}: A 1-D {@code Tensor} holding histogram of values. */ @Namespace("tensorflow::ops") @NoOffset public static class HistogramFixedWidth extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HistogramFixedWidth(Pointer p) { super(p); } /** Optional attribute setters for HistogramFixedWidth */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_INT32 */ public native @ByVal Attrs Dtype(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int dtype_(); public native Attrs dtype_(int dtype_); } public HistogramFixedWidth(@Const @ByRef Scope scope, @ByVal Input values, @ByVal Input value_range, @ByVal Input nbins) { super((Pointer)null); allocate(scope, values, value_range, nbins); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input values, @ByVal Input value_range, @ByVal Input nbins); public HistogramFixedWidth(@Const @ByRef Scope scope, @ByVal Input values, @ByVal Input value_range, @ByVal Input nbins, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, values, value_range, nbins, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input values, @ByVal Input value_range, @ByVal Input nbins, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Dtype(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native HistogramFixedWidth operation(Operation operation); public native @ByRef Output out(); public native HistogramFixedWidth out(Output out); } /** Compute the lower regularized incomplete Gamma function {@code P(a, x)}. * * The lower regularized incomplete Gamma function is defined as: * * * \\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\) * * where * * \\(gamma(a, x) = \\int_{0}^{x} t^{a-1} exp(-t) dt\\) * * is the lower incomplete Gamma function. * * Note, above {@code Q(a, x)} ({@code Igammac}) is the upper regularized complete * Gamma function. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Igamma extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Igamma(Pointer p) { super(p); } public Igamma(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input x) { super((Pointer)null); allocate(scope, a, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Igamma operation(Operation operation); public native @ByRef Output z(); public native Igamma z(Output z); } /** Compute the upper regularized incomplete Gamma function {@code Q(a, x)}. * * The upper regularized incomplete Gamma function is defined as: * * \\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\) * * where * * \\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\) * * is the upper incomplete Gama function. * * Note, above {@code P(a, x)} ({@code Igamma}) is the lower regularized complete * Gamma function. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Igammac extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Igammac(Pointer p) { super(p); } public Igammac(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input x) { super((Pointer)null); allocate(scope, a, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Igammac operation(Operation operation); public native @ByRef Output z(); public native Igammac z(Output z); } /** Returns the imaginary part of a complex number. * * Given a tensor {@code input} of complex numbers, this operation returns a tensor of * type {@code float} that is the imaginary part of each element in {@code input}. All * elements in {@code input} must be complex numbers of the form \\(a + bj\\), where *a* * is the real part and *b* is the imaginary part returned by this operation. * * For example: * *
{@code
 *  # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
 *  tf.imag(input) ==> [4.75, 5.75]
 *  }
* * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Imag extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Imag(Pointer p) { super(p); } /** Optional attribute setters for Imag */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_FLOAT */ public native @ByVal Attrs Tout(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int Tout_(); public native Attrs Tout_(int Tout_); } public Imag(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public Imag(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Tout(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native Imag operation(Operation operation); public native @ByRef Output output(); public native Imag output(Output output); } /** Computes the reciprocal of x element-wise. * * I.e., \\(y = 1 / x\\). * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Inv extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Inv(Pointer p) { super(p); } public Inv(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Inv operation(Operation operation); public native @ByRef Output y(); public native Inv y(Output y); } /** Returns which elements of x are finite. * * \compatibility(numpy) * Equivalent to np.isfinite * \end_compatibility * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class IsFinite extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IsFinite(Pointer p) { super(p); } public IsFinite(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native IsFinite operation(Operation operation); public native @ByRef Output y(); public native IsFinite y(Output y); } /** Returns which elements of x are Inf. * * \compatibility(numpy) * Equivalent to np.isinf * \end_compatibility * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class IsInf extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IsInf(Pointer p) { super(p); } public IsInf(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native IsInf operation(Operation operation); public native @ByRef Output y(); public native IsInf y(Output y); } /** Returns which elements of x are NaN. * * \compatibility(numpy) * Equivalent to np.isnan * \end_compatibility * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class IsNan extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IsNan(Pointer p) { super(p); } public IsNan(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native IsNan operation(Operation operation); public native @ByRef Output y(); public native IsNan y(Output y); } /** Returns the truth value of (x < y) element-wise. * * *NOTE*: {@code Less} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Less extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Less(Pointer p) { super(p); } public Less(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Less operation(Operation operation); public native @ByRef Output z(); public native Less z(Output z); } /** Returns the truth value of (x <= y) element-wise. * * *NOTE*: {@code LessEqual} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class LessEqual extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LessEqual(Pointer p) { super(p); } public LessEqual(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native LessEqual operation(Operation operation); public native @ByRef Output z(); public native LessEqual z(Output z); } /** Computes the log of the absolute value of {@code Gamma(x)} element-wise. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Lgamma extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Lgamma(Pointer p) { super(p); } public Lgamma(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Lgamma operation(Operation operation); public native @ByRef Output y(); public native Lgamma y(Output y); } /** Generates values in an interval. * * A sequence of {@code num} evenly-spaced values are generated beginning at {@code start}. * If {@code num > 1}, the values in the sequence increase by {@code stop - start / num - 1}, * so that the last one is exactly {@code stop}. * * For example: * *
{@code
 *  tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
 *  }
* * Arguments: * * scope: A Scope object * * start: 0-D tensor. First entry in the range. * * stop: 0-D tensor. Last entry in the range. * * num: 0-D tensor. Number of values to generate. * * Returns: * * {@code Output}: 1-D. The generated values. */ @Namespace("tensorflow::ops") @NoOffset public static class LinSpace extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LinSpace(Pointer p) { super(p); } public LinSpace(@Const @ByRef Scope scope, @ByVal Input start, @ByVal Input stop, @ByVal Input num) { super((Pointer)null); allocate(scope, start, stop, num); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input start, @ByVal Input stop, @ByVal Input num); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native LinSpace operation(Operation operation); public native @ByRef Output output(); public native LinSpace output(Output output); } /** Computes natural logarithm of x element-wise. * * I.e., \\(y = \log_e x\\). * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Log extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Log(Pointer p) { super(p); } public Log(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Log operation(Operation operation); public native @ByRef Output y(); public native Log y(Output y); } /** Computes natural logarithm of (1 + x) element-wise. * * I.e., \\(y = \log_e (1 + x)\\). * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Log1p extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Log1p(Pointer p) { super(p); } public Log1p(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Log1p operation(Operation operation); public native @ByRef Output y(); public native Log1p y(Output y); } /** Returns the truth value of x AND y element-wise. * * *NOTE*: {@code LogicalAnd} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class LogicalAnd extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LogicalAnd(Pointer p) { super(p); } public LogicalAnd(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native LogicalAnd operation(Operation operation); public native @ByRef Output z(); public native LogicalAnd z(Output z); } /** Returns the truth value of NOT x element-wise. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class LogicalNot extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LogicalNot(Pointer p) { super(p); } public LogicalNot(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native LogicalNot operation(Operation operation); public native @ByRef Output y(); public native LogicalNot y(Output y); } /** Returns the truth value of x OR y element-wise. * * *NOTE*: {@code LogicalOr} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class LogicalOr extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LogicalOr(Pointer p) { super(p); } public LogicalOr(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native LogicalOr operation(Operation operation); public native @ByRef Output z(); public native LogicalOr z(Output z); } /** Multiply the matrix "a" by the matrix "b". * * The inputs must be two-dimensional matrices and the inner dimension of * "a" (after being transposed if transpose_a is true) must match the * outer dimension of "b" (after being transposed if transposed_b is * true). * * *Note*: The default kernel implementation for MatMul on GPUs uses * cublas. * * Arguments: * * scope: A Scope object * * Optional attributes (see {@code Attrs}): * * transpose_a: If true, "a" is transposed before multiplication. * * transpose_b: If true, "b" is transposed before multiplication. * * Returns: * * {@code Output}: The product tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class MatMul extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MatMul(Pointer p) { super(p); } /** Optional attribute setters for MatMul */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, "a" is transposed before multiplication. * * Defaults to false */ /// public native @ByVal Attrs TransposeA(@Cast("bool") boolean x); /** If true, "b" is transposed before multiplication. * * Defaults to false */ public native @ByVal Attrs TransposeB(@Cast("bool") boolean x); public native @Cast("bool") boolean transpose_a_(); public native Attrs transpose_a_(boolean transpose_a_); public native @Cast("bool") boolean transpose_b_(); public native Attrs transpose_b_(boolean transpose_b_); } public MatMul(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input b) { super((Pointer)null); allocate(scope, a, b); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input b); public MatMul(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input b, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, a, b, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input b, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs TransposeA(@Cast("bool") boolean x); public static native @ByVal Attrs TransposeB(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native MatMul operation(Operation operation); public native @ByRef Output product(); public native MatMul product(Output product); } /** Computes the maximum of elements across dimensions of a tensor. * * Reduces {@code input} along the dimensions given in {@code axis}. Unless * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * * Arguments: * * scope: A Scope object * * input: The tensor to reduce. * * axis: The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. * * Optional attributes (see {@code Attrs}): * * keep_dims: If true, retain reduced dimensions with length 1. * * Returns: * * {@code Output}: The reduced tensor. * * Aliases: * * ReduceMax */ @Namespace("tensorflow::ops") @NoOffset public static class Max extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Max(Pointer p) { super(p); } /** Optional attribute setters for Max */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, retain reduced dimensions with length 1. * * Defaults to false */ public native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @Cast("bool") boolean keep_dims_(); public native Attrs keep_dims_(boolean keep_dims_); } public Max(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis) { super((Pointer)null); allocate(scope, input, axis); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis); public Max(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, axis, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native Max operation(Operation operation); public native @ByRef Output output(); public native Max output(Output output); } /// /// /// /** Returns the max of x and y (i.e. x > y ? x : y) element-wise. * * *NOTE*: {@code Maximum} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Maximum extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Maximum(Pointer p) { super(p); } public Maximum(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Maximum operation(Operation operation); public native @ByRef Output z(); public native Maximum z(Output z); } /** Computes the mean of elements across dimensions of a tensor. * * Reduces {@code input} along the dimensions given in {@code axis}. Unless * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * * Arguments: * * scope: A Scope object * * input: The tensor to reduce. * * axis: The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. * * Optional attributes (see {@code Attrs}): * * keep_dims: If true, retain reduced dimensions with length 1. * * Returns: * * {@code Output}: The reduced tensor. * * Aliases: * * ReduceMean */ @Namespace("tensorflow::ops") @NoOffset public static class Mean extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Mean(Pointer p) { super(p); } /** Optional attribute setters for Mean */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, retain reduced dimensions with length 1. * * Defaults to false */ public native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @Cast("bool") boolean keep_dims_(); public native Attrs keep_dims_(boolean keep_dims_); } public Mean(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis) { super((Pointer)null); allocate(scope, input, axis); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis); public Mean(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, axis, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native Mean operation(Operation operation); public native @ByRef Output output(); public native Mean output(Output output); } /// /// /// /// /// /** Computes the minimum of elements across dimensions of a tensor. * * Reduces {@code input} along the dimensions given in {@code axis}. Unless * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * * Arguments: * * scope: A Scope object * * input: The tensor to reduce. * * axis: The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. * * Optional attributes (see {@code Attrs}): * * keep_dims: If true, retain reduced dimensions with length 1. * * Returns: * * {@code Output}: The reduced tensor. * * Aliases: * * ReduceMin */ @Namespace("tensorflow::ops") @NoOffset public static class Min extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Min(Pointer p) { super(p); } /** Optional attribute setters for Min */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, retain reduced dimensions with length 1. * * Defaults to false */ public native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @Cast("bool") boolean keep_dims_(); public native Attrs keep_dims_(boolean keep_dims_); } public Min(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis) { super((Pointer)null); allocate(scope, input, axis); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis); public Min(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, axis, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native Min operation(Operation operation); public native @ByRef Output output(); public native Min output(Output output); } /// /// /// /** Returns the min of x and y (i.e. x < y ? x : y) element-wise. * * *NOTE*: {@code Minimum} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Minimum extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Minimum(Pointer p) { super(p); } public Minimum(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Minimum operation(Operation operation); public native @ByRef Output z(); public native Minimum z(Output z); } /** Returns element-wise remainder of division. This emulates C semantics in that * * the result here is consistent with a truncating divide. E.g. * {@code tf.truncatediv(x, y) * y + truncate_mod(x, y) = x}. * * *NOTE*: {@code Mod} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Mod extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Mod(Pointer p) { super(p); } public Mod(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Mod operation(Operation operation); public native @ByRef Output z(); public native Mod z(Output z); } /** Returns x * y element-wise. * * *NOTE*: {@code Multiply} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. * * Aliases: * * Mul */ @Namespace("tensorflow::ops") @NoOffset public static class Multiply extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Multiply(Pointer p) { super(p); } public Multiply(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Multiply operation(Operation operation); public native @ByRef Output z(); public native Multiply z(Output z); } /// /// /// /// /** Computes numerical negative value element-wise. * * I.e., \\(y = -x\\). * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. * * Aliases: * * Neg */ @Namespace("tensorflow::ops") @NoOffset public static class Negate extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Negate(Pointer p) { super(p); } public Negate(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Negate operation(Operation operation); public native @ByRef Output y(); public native Negate y(Output y); } /// /// /// /** Returns the truth value of (x != y) element-wise. * * *NOTE*: {@code NotEqual} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class NotEqual extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NotEqual(Pointer p) { super(p); } public NotEqual(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native NotEqual operation(Operation operation); public native @ByRef Output z(); public native NotEqual z(Output z); } /** Compute the polygamma function \\(\psi^{(n)}(x)\\). * * The polygamma function is defined as: * * * \\(\psi^{(n)}(x) = \frac{d^n}{dx^n} \psi(x)\\) * * where \\(\psi(x)\\) is the digamma function. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Polygamma extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Polygamma(Pointer p) { super(p); } public Polygamma(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input x) { super((Pointer)null); allocate(scope, a, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Polygamma operation(Operation operation); public native @ByRef Output z(); public native Polygamma z(Output z); } /** Computes the power of one value to another. * * Given a tensor {@code x} and a tensor {@code y}, this operation computes \\(x^y\\) for * corresponding elements in {@code x} and {@code y}. For example: * *
{@code
 *  # tensor 'x' is [[2, 2]], [3, 3]]
 *  # tensor 'y' is [[8, 16], [2, 3]]
 *  tf.pow(x, y) ==> [[256, 65536], [9, 27]]
 *  }
* * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Pow extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Pow(Pointer p) { super(p); } public Pow(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Pow operation(Operation operation); public native @ByRef Output z(); public native Pow z(Output z); } /** Computes the product of elements across dimensions of a tensor. * * Reduces {@code input} along the dimensions given in {@code axis}. Unless * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * * Arguments: * * scope: A Scope object * * input: The tensor to reduce. * * axis: The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. * * Optional attributes (see {@code Attrs}): * * keep_dims: If true, retain reduced dimensions with length 1. * * Returns: * * {@code Output}: The reduced tensor. * * Aliases: * * ReduceProd */ @Namespace("tensorflow::ops") @NoOffset public static class Prod extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Prod(Pointer p) { super(p); } /** Optional attribute setters for Prod */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, retain reduced dimensions with length 1. * * Defaults to false */ public native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @Cast("bool") boolean keep_dims_(); public native Attrs keep_dims_(boolean keep_dims_); } public Prod(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis) { super((Pointer)null); allocate(scope, input, axis); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis); public Prod(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, axis, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native Prod operation(Operation operation); public native @ByRef Output output(); public native Prod output(Output output); } /// /// /// /// /// /// /** Convert the quantized 'input' tensor into a lower-precision 'output', using the * * actual distribution of the values to maximize the usage of the lower bit depth * and adjusting the output min and max ranges accordingly. * * [input_min, input_max] are scalar floats that specify the range for the float * interpretation of the 'input' data. For example, if input_min is -1.0f and * input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. * * This operator tries to squeeze as much precision as possible into an output with * a lower bit depth by calculating the actual min and max values found in the * data. For example, maybe that quint16 input has no values lower than 16,384 and * none higher than 49,152. That means only half the range is actually needed, all * the float interpretations are between -0.5f and 0.5f, so if we want to compress * the data into a quint8 output, we can use that range rather than the theoretical * -1.0f to 1.0f that is suggested by the input min and max. * * In practice, this is most useful for taking output from operations like * QuantizedMatMul that can produce higher bit-depth outputs than their inputs and * may have large potential output ranges, but in practice have a distribution of * input values that only uses a small fraction of the possible range. By feeding * that output into this operator, we can reduce it from 32 bits down to 8 with * minimal loss of accuracy. * * Arguments: * * scope: A Scope object * * input_min: The float value that the minimum quantized input value represents. * * input_max: The float value that the maximum quantized input value represents. * * out_type: The type of the output. Should be a lower bit depth than Tinput. * * Returns: * * {@code Output} output * * {@code Output} output_min: The float value that the minimum quantized output value represents. * * {@code Output} output_max: The float value that the maximum quantized output value represents. */ @Namespace("tensorflow::ops") @NoOffset public static class QuantizeDownAndShrinkRange extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QuantizeDownAndShrinkRange(Pointer p) { super(p); } public QuantizeDownAndShrinkRange(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input input_min, @ByVal Input input_max, @Cast("tensorflow::DataType") int out_type) { super((Pointer)null); allocate(scope, input, input_min, input_max, out_type); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input input_min, @ByVal Input input_max, @Cast("tensorflow::DataType") int out_type); public native @ByRef Operation operation(); public native QuantizeDownAndShrinkRange operation(Operation operation); public native @ByRef Output output(); public native QuantizeDownAndShrinkRange output(Output output); public native @ByRef Output output_min(); public native QuantizeDownAndShrinkRange output_min(Output output_min); public native @ByRef Output output_max(); public native QuantizeDownAndShrinkRange output_max(Output output_max); } /** Returns x + y element-wise, working on quantized buffers. * * Arguments: * * scope: A Scope object * * min_x: The float value that the lowest quantized {@code x} value represents. * * max_x: The float value that the highest quantized {@code x} value represents. * * min_y: The float value that the lowest quantized {@code y} value represents. * * max_y: The float value that the highest quantized {@code y} value represents. * * Returns: * * {@code Output} z * * {@code Output} min_z: The float value that the lowest quantized output value represents. * * {@code Output} max_z: The float value that the highest quantized output value represents. * * *NOTE*: {@code QuantizedAdd} supports limited forms of broadcasting. More about * broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) */ @Namespace("tensorflow::ops") @NoOffset public static class QuantizedAdd extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QuantizedAdd(Pointer p) { super(p); } /** Optional attribute setters for QuantizedAdd */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_QINT32 */ public native @ByVal Attrs Toutput(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int Toutput_(); public native Attrs Toutput_(int Toutput_); } public QuantizedAdd(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y, @ByVal Input min_x, @ByVal Input max_x, @ByVal Input min_y, @ByVal Input max_y) { super((Pointer)null); allocate(scope, x, y, min_x, max_x, min_y, max_y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y, @ByVal Input min_x, @ByVal Input max_x, @ByVal Input min_y, @ByVal Input max_y); public QuantizedAdd(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y, @ByVal Input min_x, @ByVal Input max_x, @ByVal Input min_y, @ByVal Input max_y, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, x, y, min_x, max_x, min_y, max_y, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y, @ByVal Input min_x, @ByVal Input max_x, @ByVal Input min_y, @ByVal Input max_y, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Toutput(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native QuantizedAdd operation(Operation operation); public native @ByRef Output z(); public native QuantizedAdd z(Output z); public native @ByRef Output min_z(); public native QuantizedAdd min_z(Output min_z); public native @ByRef Output max_z(); public native QuantizedAdd max_z(Output max_z); } /** Perform a quantized matrix multiplication of {@code a} by the matrix {@code b}. * * The inputs must be two-dimensional matrices and the inner dimension of * {@code a} (after being transposed if {@code transpose_a} is non-zero) must match the * outer dimension of {@code b} (after being transposed if {@code transposed_b} is * non-zero). * * Arguments: * * scope: A Scope object * * a: Must be a two-dimensional tensor. * * b: Must be a two-dimensional tensor. * * min_a: The float value that the lowest quantized {@code a} value represents. * * max_a: The float value that the highest quantized {@code a} value represents. * * min_b: The float value that the lowest quantized {@code b} value represents. * * max_b: The float value that the highest quantized {@code b} value represents. * * Optional attributes (see {@code Attrs}): * * transpose_a: If true, {@code a} is transposed before multiplication. * * transpose_b: If true, {@code b} is transposed before multiplication. * * Tactivation: The type of output produced by activation function * following this operation. * * Returns: * * {@code Output} out * * {@code Output} min_out: The float value that the lowest quantized output value represents. * * {@code Output} max_out: The float value that the highest quantized output value represents. */ @Namespace("tensorflow::ops") @NoOffset public static class QuantizedMatMul extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QuantizedMatMul(Pointer p) { super(p); } /** Optional attribute setters for QuantizedMatMul */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_QINT32 */ /// public native @ByVal Attrs Toutput(@Cast("tensorflow::DataType") int x); /** If true, {@code a} is transposed before multiplication. * * Defaults to false */ /// public native @ByVal Attrs TransposeA(@Cast("bool") boolean x); /** If true, {@code b} is transposed before multiplication. * * Defaults to false */ /// public native @ByVal Attrs TransposeB(@Cast("bool") boolean x); /** The type of output produced by activation function * following this operation. * * Defaults to DT_QUINT8 */ public native @ByVal Attrs Tactivation(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int Toutput_(); public native Attrs Toutput_(int Toutput_); public native @Cast("bool") boolean transpose_a_(); public native Attrs transpose_a_(boolean transpose_a_); public native @Cast("bool") boolean transpose_b_(); public native Attrs transpose_b_(boolean transpose_b_); public native @Cast("tensorflow::DataType") int Tactivation_(); public native Attrs Tactivation_(int Tactivation_); } public QuantizedMatMul(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input b, @ByVal Input min_a, @ByVal Input max_a, @ByVal Input min_b, @ByVal Input max_b) { super((Pointer)null); allocate(scope, a, b, min_a, max_a, min_b, max_b); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input b, @ByVal Input min_a, @ByVal Input max_a, @ByVal Input min_b, @ByVal Input max_b); public QuantizedMatMul(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input b, @ByVal Input min_a, @ByVal Input max_a, @ByVal Input min_b, @ByVal Input max_b, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, a, b, min_a, max_a, min_b, max_b, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input b, @ByVal Input min_a, @ByVal Input max_a, @ByVal Input min_b, @ByVal Input max_b, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Toutput(@Cast("tensorflow::DataType") int x); public static native @ByVal Attrs TransposeA(@Cast("bool") boolean x); public static native @ByVal Attrs TransposeB(@Cast("bool") boolean x); public static native @ByVal Attrs Tactivation(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native QuantizedMatMul operation(Operation operation); public native @ByRef Output out(); public native QuantizedMatMul out(Output out); public native @ByRef Output min_out(); public native QuantizedMatMul min_out(Output min_out); public native @ByRef Output max_out(); public native QuantizedMatMul max_out(Output max_out); } /** Returns x * y element-wise, working on quantized buffers. * * Arguments: * * scope: A Scope object * * min_x: The float value that the lowest quantized {@code x} value represents. * * max_x: The float value that the highest quantized {@code x} value represents. * * min_y: The float value that the lowest quantized {@code y} value represents. * * max_y: The float value that the highest quantized {@code y} value represents. * * Returns: * * {@code Output} z * * {@code Output} min_z: The float value that the lowest quantized output value represents. * * {@code Output} max_z: The float value that the highest quantized output value represents. * * *NOTE*: {@code QuantizedMul} supports limited forms of broadcasting. More about * broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) */ @Namespace("tensorflow::ops") @NoOffset public static class QuantizedMul extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QuantizedMul(Pointer p) { super(p); } /** Optional attribute setters for QuantizedMul */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_QINT32 */ public native @ByVal Attrs Toutput(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int Toutput_(); public native Attrs Toutput_(int Toutput_); } public QuantizedMul(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y, @ByVal Input min_x, @ByVal Input max_x, @ByVal Input min_y, @ByVal Input max_y) { super((Pointer)null); allocate(scope, x, y, min_x, max_x, min_y, max_y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y, @ByVal Input min_x, @ByVal Input max_x, @ByVal Input min_y, @ByVal Input max_y); public QuantizedMul(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y, @ByVal Input min_x, @ByVal Input max_x, @ByVal Input min_y, @ByVal Input max_y, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, x, y, min_x, max_x, min_y, max_y, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y, @ByVal Input min_x, @ByVal Input max_x, @ByVal Input min_y, @ByVal Input max_y, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Toutput(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native QuantizedMul operation(Operation operation); public native @ByRef Output z(); public native QuantizedMul z(Output z); public native @ByRef Output min_z(); public native QuantizedMul min_z(Output min_z); public native @ByRef Output max_z(); public native QuantizedMul max_z(Output max_z); } /** Creates a sequence of numbers. * * This operation creates a sequence of numbers that begins at {@code start} and * extends by increments of {@code delta} up to but not including {@code limit}. * * For example: * *
{@code
 *  # 'start' is 3
 *  # 'limit' is 18
 *  # 'delta' is 3
 *  tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
 *  }
* * Arguments: * * scope: A Scope object * * start: 0-D (scalar). First entry in the sequence. * * limit: 0-D (scalar). Upper limit of sequence, exclusive. * * delta: 0-D (scalar). Optional. Default is 1. Number that increments {@code start}. * * Returns: * * {@code Output}: 1-D. */ @Namespace("tensorflow::ops") @NoOffset public static class Range extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Range(Pointer p) { super(p); } public Range(@Const @ByRef Scope scope, @ByVal Input start, @ByVal Input limit, @ByVal Input delta) { super((Pointer)null); allocate(scope, start, limit, delta); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input start, @ByVal Input limit, @ByVal Input delta); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Range operation(Operation operation); public native @ByRef Output output(); public native Range output(Output output); } /** Returns the real part of a complex number. * * Given a tensor {@code input} of complex numbers, this operation returns a tensor of * type {@code float} that is the real part of each element in {@code input}. All elements in * {@code input} must be complex numbers of the form \\(a + bj\\), where *a* is the real * part returned by this operation and *b* is the imaginary part. * * For example: * *
{@code
 *  # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
 *  tf.real(input) ==> [-2.25, 3.25]
 *  }
* * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Real extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Real(Pointer p) { super(p); } /** Optional attribute setters for Real */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_FLOAT */ public native @ByVal Attrs Tout(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int Tout_(); public native Attrs Tout_(int Tout_); } public Real(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public Real(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Tout(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native Real operation(Operation operation); public native @ByRef Output output(); public native Real output(Output output); } /** Returns x / y element-wise for real types. * * If {@code x} and {@code y} are reals, this will return the floating-point division. * * *NOTE*: {@code Div} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class RealDiv extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RealDiv(Pointer p) { super(p); } public RealDiv(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native RealDiv operation(Operation operation); public native @ByRef Output z(); public native RealDiv z(Output z); } /** Computes the reciprocal of x element-wise. * * I.e., \\(y = 1 / x\\). * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Reciprocal extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Reciprocal(Pointer p) { super(p); } public Reciprocal(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Reciprocal operation(Operation operation); public native @ByRef Output y(); public native Reciprocal y(Output y); } /** Given a quantized tensor described by (input, input_min, input_max), outputs a * * range that covers the actual values present in that tensor. This op is * typically used to produce the requested_output_min and requested_output_max for * Requantize. * * Arguments: * * scope: A Scope object * * input_min: The float value that the minimum quantized input value represents. * * input_max: The float value that the maximum quantized input value represents. * * Returns: * * {@code Output} output_min: The computed min output. * * {@code Output} output_max: the computed max output. */ @Namespace("tensorflow::ops") @NoOffset public static class RequantizationRange extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RequantizationRange(Pointer p) { super(p); } public RequantizationRange(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input input_min, @ByVal Input input_max) { super((Pointer)null); allocate(scope, input, input_min, input_max); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input input_min, @ByVal Input input_max); public native @ByRef Operation operation(); public native RequantizationRange operation(Operation operation); public native @ByRef Output output_min(); public native RequantizationRange output_min(Output output_min); public native @ByRef Output output_max(); public native RequantizationRange output_max(Output output_max); } /** Convert the quantized 'input' tensor into a lower-precision 'output', using the * * output range specified with 'requested_output_min' and 'requested_output_max'. * * [input_min, input_max] are scalar floats that specify the range for the float * interpretation of the 'input' data. For example, if input_min is -1.0f and * input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. * * Arguments: * * scope: A Scope object * * input_min: The float value that the minimum quantized input value represents. * * input_max: The float value that the maximum quantized input value represents. * * requested_output_min: The float value that the minimum quantized output value represents. * * requested_output_max: The float value that the maximum quantized output value represents. * * out_type: The type of the output. Should be a lower bit depth than Tinput. * * Returns: * * {@code Output} output * * {@code Output} output_min: The requested_output_min value is copied into this output. * * {@code Output} output_max: The requested_output_max value is copied into this output. */ @Namespace("tensorflow::ops") @NoOffset public static class Requantize extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Requantize(Pointer p) { super(p); } public Requantize(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input input_min, @ByVal Input input_max, @ByVal Input requested_output_min, @ByVal Input requested_output_max, @Cast("tensorflow::DataType") int out_type) { super((Pointer)null); allocate(scope, input, input_min, input_max, requested_output_min, requested_output_max, out_type); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input input_min, @ByVal Input input_max, @ByVal Input requested_output_min, @ByVal Input requested_output_max, @Cast("tensorflow::DataType") int out_type); public native @ByRef Operation operation(); public native Requantize operation(Operation operation); public native @ByRef Output output(); public native Requantize output(Output output); public native @ByRef Output output_min(); public native Requantize output_min(Output output_min); public native @ByRef Output output_max(); public native Requantize output_max(Output output_max); } /** Returns element-wise integer closest to x. * * If the result is midway between two representable values, * the even representable is chosen. * For example: * *
{@code
 *  rint(-1.5) ==> -2.0
 *  rint(0.5000001) ==> 1.0
 *  rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]
 *  }
* * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Rint extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Rint(Pointer p) { super(p); } public Rint(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Rint operation(Operation operation); public native @ByRef Output y(); public native Rint y(Output y); } /** Rounds the values of a tensor to the nearest integer, element-wise. * * Rounds half to even. Also known as bankers rounding. If you want to round * according to the current system rounding mode use std::cint. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Round extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Round(Pointer p) { super(p); } public Round(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Round operation(Operation operation); public native @ByRef Output y(); public native Round y(Output y); } /** Computes reciprocal of square root of x element-wise. * * I.e., \\(y = 1 / \sqrt{x}\\). * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Rsqrt extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Rsqrt(Pointer p) { super(p); } public Rsqrt(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Rsqrt operation(Operation operation); public native @ByRef Output y(); public native Rsqrt y(Output y); } /** Computes the maximum along segments of a tensor. * * Read * [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) * for an explanation of segments. * * Computes a tensor such that * \\(output_i = \max_j(data_j)\\) where {@code max} is over {@code j} such * that {@code segment_ids[j] == i}. * * If the max is empty for a given segment ID {@code i}, {@code output[i] = 0}. * *
* *
* * Arguments: * * scope: A Scope object * * segment_ids: A 1-D tensor whose size is equal to the size of {@code data}'s * first dimension. Values should be sorted and can be repeated. * * Returns: * * {@code Output}: Has same shape as data, except for dimension 0 which * has size {@code k}, the number of segments. */ @Namespace("tensorflow::ops") @NoOffset public static class SegmentMax extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SegmentMax(Pointer p) { super(p); } public SegmentMax(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input segment_ids) { super((Pointer)null); allocate(scope, data, segment_ids); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input segment_ids); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SegmentMax operation(Operation operation); public native @ByRef Output output(); public native SegmentMax output(Output output); } /** Computes the mean along segments of a tensor. * * Read * [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) * for an explanation of segments. * * Computes a tensor such that * \\(output_i = \frac{\sum_j data_j}{N}\\) where {@code mean} is * over {@code j} such that {@code segment_ids[j] == i} and {@code N} is the total number of * values summed. * * If the mean is empty for a given segment ID {@code i}, {@code output[i] = 0}. * *
* *
* * Arguments: * * scope: A Scope object * * segment_ids: A 1-D tensor whose size is equal to the size of {@code data}'s * first dimension. Values should be sorted and can be repeated. * * Returns: * * {@code Output}: Has same shape as data, except for dimension 0 which * has size {@code k}, the number of segments. */ @Namespace("tensorflow::ops") @NoOffset public static class SegmentMean extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SegmentMean(Pointer p) { super(p); } public SegmentMean(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input segment_ids) { super((Pointer)null); allocate(scope, data, segment_ids); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input segment_ids); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SegmentMean operation(Operation operation); public native @ByRef Output output(); public native SegmentMean output(Output output); } /** Computes the minimum along segments of a tensor. * * Read * [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) * for an explanation of segments. * * Computes a tensor such that * \\(output_i = \min_j(data_j)\\) where {@code min} is over {@code j} such * that {@code segment_ids[j] == i}. * * If the min is empty for a given segment ID {@code i}, {@code output[i] = 0}. * *
* *
* * Arguments: * * scope: A Scope object * * segment_ids: A 1-D tensor whose size is equal to the size of {@code data}'s * first dimension. Values should be sorted and can be repeated. * * Returns: * * {@code Output}: Has same shape as data, except for dimension 0 which * has size {@code k}, the number of segments. */ @Namespace("tensorflow::ops") @NoOffset public static class SegmentMin extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SegmentMin(Pointer p) { super(p); } public SegmentMin(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input segment_ids) { super((Pointer)null); allocate(scope, data, segment_ids); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input segment_ids); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SegmentMin operation(Operation operation); public native @ByRef Output output(); public native SegmentMin output(Output output); } /** Computes the product along segments of a tensor. * * Read * [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) * for an explanation of segments. * * Computes a tensor such that * \\(output_i = \prod_j data_j\\) where the product is over {@code j} such * that {@code segment_ids[j] == i}. * * If the product is empty for a given segment ID {@code i}, {@code output[i] = 1}. * *
* *
* * Arguments: * * scope: A Scope object * * segment_ids: A 1-D tensor whose size is equal to the size of {@code data}'s * first dimension. Values should be sorted and can be repeated. * * Returns: * * {@code Output}: Has same shape as data, except for dimension 0 which * has size {@code k}, the number of segments. */ @Namespace("tensorflow::ops") @NoOffset public static class SegmentProd extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SegmentProd(Pointer p) { super(p); } public SegmentProd(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input segment_ids) { super((Pointer)null); allocate(scope, data, segment_ids); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input segment_ids); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SegmentProd operation(Operation operation); public native @ByRef Output output(); public native SegmentProd output(Output output); } /** Computes the sum along segments of a tensor. * * Read * [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) * for an explanation of segments. * * Computes a tensor such that * \\(output_i = \sum_j data_j\\) where sum is over {@code j} such * that {@code segment_ids[j] == i}. * * If the sum is empty for a given segment ID {@code i}, {@code output[i] = 0}. * *
* *
* * Arguments: * * scope: A Scope object * * segment_ids: A 1-D tensor whose size is equal to the size of {@code data}'s * first dimension. Values should be sorted and can be repeated. * * Returns: * * {@code Output}: Has same shape as data, except for dimension 0 which * has size {@code k}, the number of segments. */ @Namespace("tensorflow::ops") @NoOffset public static class SegmentSum extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SegmentSum(Pointer p) { super(p); } public SegmentSum(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input segment_ids) { super((Pointer)null); allocate(scope, data, segment_ids); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input segment_ids); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SegmentSum operation(Operation operation); public native @ByRef Output output(); public native SegmentSum output(Output output); } /** Selects elements from {@code x} or {@code y}, depending on {@code condition}. * * The {@code x}, and {@code y} tensors must all have the same shape, and the * output will also have that shape. * * The {@code condition} tensor must be a scalar if {@code x} and {@code y} are scalars. * If {@code x} and {@code y} are vectors or higher rank, then {@code condition} must be either a * scalar, a vector with size matching the first dimension of {@code x}, or must have * the same shape as {@code x}. * * The {@code condition} tensor acts as a mask that chooses, based on the value at each * element, whether the corresponding element / row in the output should be * taken from {@code x} (if true) or {@code y} (if false). * * If {@code condition} is a vector and {@code x} and {@code y} are higher rank matrices, then * it chooses which row (outer dimension) to copy from {@code x} and {@code y}. * If {@code condition} has the same shape as {@code x} and {@code y}, then it chooses which * element to copy from {@code x} and {@code y}. * * For example: * *
{@code python
 *  # 'condition' tensor is [[True,  False]
 *  #                        [False, True]]
 *  # 't' is [[1, 2],
 *  #         [3, 4]]
 *  # 'e' is [[5, 6],
 *  #         [7, 8]]
 *  select(condition, t, e)  # => [[1, 6], [7, 4]]
 * 
 * 
 *  # 'condition' tensor is [True, False]
 *  # 't' is [[1, 2],
 *  #         [3, 4]]
 *  # 'e' is [[5, 6],
 *  #         [7, 8]]
 *  select(condition, t, e) ==> [[1, 2],
 *                               [7, 8]]
 * 
 *  }
* * Arguments: * * scope: A Scope object * * x: = A {@code Tensor} which may have the same shape as {@code condition}. * If {@code condition} is rank 1, {@code x} may have higher rank, * but its first dimension must match the size of {@code condition}. * * y: = A {@code Tensor} with the same type and shape as {@code x}. * * Returns: * * {@code Output}: = A {@code Tensor} with the same type and shape as {@code x} and {@code y}. */ @Namespace("tensorflow::ops") @NoOffset public static class Where3 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Where3(Pointer p) { super(p); } public Where3(@Const @ByRef Scope scope, @ByVal Input condition, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, condition, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input condition, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Where3 operation(Operation operation); public native @ByRef Output output(); public native Where3 output(Output output); } /** Computes sigmoid of {@code x} element-wise. * * Specifically, {@code y = 1 / (1 + exp(-x))}. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Sigmoid extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Sigmoid(Pointer p) { super(p); } public Sigmoid(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Sigmoid operation(Operation operation); public native @ByRef Output y(); public native Sigmoid y(Output y); } /** Returns an element-wise indication of the sign of a number. * * {@code y = sign(x) = -1} if {@code x < 0}; 0 if {@code x == 0}; 1 if {@code x > 0}. * * For complex numbers, {@code y = sign(x) = x / |x|} if {@code x != 0}, otherwise {@code y = 0}. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Sign extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Sign(Pointer p) { super(p); } public Sign(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Sign operation(Operation operation); public native @ByRef Output y(); public native Sign y(Output y); } /** Computes sin of x element-wise. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Sin extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Sin(Pointer p) { super(p); } public Sin(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Sin operation(Operation operation); public native @ByRef Output y(); public native Sin y(Output y); } /** Computes hyperbolic sine of x element-wise. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Sinh extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Sinh(Pointer p) { super(p); } public Sinh(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Sinh operation(Operation operation); public native @ByRef Output y(); public native Sinh y(Output y); } /** Multiply matrix "a" by matrix "b". * * The inputs must be two-dimensional matrices and the inner dimension of "a" must * match the outer dimension of "b". Both "a" and "b" must be {@code Tensor}s not * {@code SparseTensor}s. This op is optimized for the case where at least one of "a" or * "b" is sparse, in the sense that they have a large proportion of zero values. * The breakeven for using this versus a dense matrix multiply on one platform was * 30% zero values in the sparse matrix. * * The gradient computation of this operation will only take advantage of sparsity * in the input gradient when that gradient comes from a Relu. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The product tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseMatMul extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseMatMul(Pointer p) { super(p); } /** Optional attribute setters for SparseMatMul */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to false */ public native @ByVal Attrs TransposeA(@Cast("bool") boolean x); /** Defaults to false */ public native @ByVal Attrs TransposeB(@Cast("bool") boolean x); /** Defaults to false */ public native @ByVal Attrs AIsSparse(@Cast("bool") boolean x); /** Defaults to false */ public native @ByVal Attrs BIsSparse(@Cast("bool") boolean x); public native @Cast("bool") boolean transpose_a_(); public native Attrs transpose_a_(boolean transpose_a_); public native @Cast("bool") boolean transpose_b_(); public native Attrs transpose_b_(boolean transpose_b_); public native @Cast("bool") boolean a_is_sparse_(); public native Attrs a_is_sparse_(boolean a_is_sparse_); public native @Cast("bool") boolean b_is_sparse_(); public native Attrs b_is_sparse_(boolean b_is_sparse_); } public SparseMatMul(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input b) { super((Pointer)null); allocate(scope, a, b); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input b); public SparseMatMul(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input b, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, a, b, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input a, @ByVal Input b, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs TransposeA(@Cast("bool") boolean x); public static native @ByVal Attrs TransposeB(@Cast("bool") boolean x); public static native @ByVal Attrs AIsSparse(@Cast("bool") boolean x); public static native @ByVal Attrs BIsSparse(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native SparseMatMul operation(Operation operation); public native @ByRef Output product(); public native SparseMatMul product(Output product); } /** Computes the mean along sparse segments of a tensor. * * Read * [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) * for an explanation of segments. * * Like {@code SegmentMean}, but {@code segment_ids} can have rank less than {@code data}'s first * dimension, selecting a subset of dimension 0, specified by {@code indices}. * * Arguments: * * scope: A Scope object * * indices: A 1-D tensor. Has same rank as {@code segment_ids}. * * segment_ids: A 1-D tensor. Values should be sorted and can be repeated. * * Returns: * * {@code Output}: Has same shape as data, except for dimension 0 which * has size {@code k}, the number of segments. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseSegmentMean extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseSegmentMean(Pointer p) { super(p); } public SparseSegmentMean(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input indices, @ByVal Input segment_ids) { super((Pointer)null); allocate(scope, data, indices, segment_ids); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input indices, @ByVal Input segment_ids); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SparseSegmentMean operation(Operation operation); public native @ByRef Output output(); public native SparseSegmentMean output(Output output); } /** Computes gradients for SparseSegmentMean. * * Returns tensor "output" with same shape as grad, except for dimension 0 whose * value is output_dim0. * * Arguments: * * scope: A Scope object * * grad: gradient propagated to the SparseSegmentMean op. * * indices: indices passed to the corresponding SparseSegmentMean op. * * segment_ids: segment_ids passed to the corresponding SparseSegmentMean op. * * output_dim0: dimension 0 of "data" passed to SparseSegmentMean op. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseSegmentMeanGrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseSegmentMeanGrad(Pointer p) { super(p); } public SparseSegmentMeanGrad(@Const @ByRef Scope scope, @ByVal Input grad, @ByVal Input indices, @ByVal Input segment_ids, @ByVal Input output_dim0) { super((Pointer)null); allocate(scope, grad, indices, segment_ids, output_dim0); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input grad, @ByVal Input indices, @ByVal Input segment_ids, @ByVal Input output_dim0); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SparseSegmentMeanGrad operation(Operation operation); public native @ByRef Output output(); public native SparseSegmentMeanGrad output(Output output); } /** Computes the mean along sparse segments of a tensor. * * Like {@code SparseSegmentMean}, but allows missing ids in {@code segment_ids}. If an id is * misisng, the {@code output} tensor at that position will be zeroed. * * Read * [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) * for an explanation of segments. * * Arguments: * * scope: A Scope object * * indices: A 1-D tensor. Has same rank as {@code segment_ids}. * * segment_ids: A 1-D tensor. Values should be sorted and can be repeated. * * num_segments: Should equal the number of distinct segment IDs. * * Returns: * * {@code Output}: Has same shape as data, except for dimension 0 which has size * {@code num_segments}. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseSegmentMeanWithNumSegments extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseSegmentMeanWithNumSegments(Pointer p) { super(p); } public SparseSegmentMeanWithNumSegments(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input indices, @ByVal Input segment_ids, @ByVal Input num_segments) { super((Pointer)null); allocate(scope, data, indices, segment_ids, num_segments); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input indices, @ByVal Input segment_ids, @ByVal Input num_segments); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SparseSegmentMeanWithNumSegments operation(Operation operation); public native @ByRef Output output(); public native SparseSegmentMeanWithNumSegments output(Output output); } /** Computes the sum along sparse segments of a tensor divided by the sqrt of N. * * N is the size of the segment being reduced. * * Read * [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) * for an explanation of segments. * * Arguments: * * scope: A Scope object * * indices: A 1-D tensor. Has same rank as {@code segment_ids}. * * segment_ids: A 1-D tensor. Values should be sorted and can be repeated. * * Returns: * * {@code Output}: Has same shape as data, except for dimension 0 which * has size {@code k}, the number of segments. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseSegmentSqrtN extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseSegmentSqrtN(Pointer p) { super(p); } public SparseSegmentSqrtN(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input indices, @ByVal Input segment_ids) { super((Pointer)null); allocate(scope, data, indices, segment_ids); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input indices, @ByVal Input segment_ids); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SparseSegmentSqrtN operation(Operation operation); public native @ByRef Output output(); public native SparseSegmentSqrtN output(Output output); } /** Computes gradients for SparseSegmentSqrtN. * * Returns tensor "output" with same shape as grad, except for dimension 0 whose * value is output_dim0. * * Arguments: * * scope: A Scope object * * grad: gradient propagated to the SparseSegmentSqrtN op. * * indices: indices passed to the corresponding SparseSegmentSqrtN op. * * segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op. * * output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseSegmentSqrtNGrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseSegmentSqrtNGrad(Pointer p) { super(p); } public SparseSegmentSqrtNGrad(@Const @ByRef Scope scope, @ByVal Input grad, @ByVal Input indices, @ByVal Input segment_ids, @ByVal Input output_dim0) { super((Pointer)null); allocate(scope, grad, indices, segment_ids, output_dim0); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input grad, @ByVal Input indices, @ByVal Input segment_ids, @ByVal Input output_dim0); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SparseSegmentSqrtNGrad operation(Operation operation); public native @ByRef Output output(); public native SparseSegmentSqrtNGrad output(Output output); } /** Computes the sum along sparse segments of a tensor divided by the sqrt of N. * * N is the size of the segment being reduced. * * Like {@code SparseSegmentSqrtN}, but allows missing ids in {@code segment_ids}. If an id is * misisng, the {@code output} tensor at that position will be zeroed. * * Read * [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) * for an explanation of segments. * * Arguments: * * scope: A Scope object * * indices: A 1-D tensor. Has same rank as {@code segment_ids}. * * segment_ids: A 1-D tensor. Values should be sorted and can be repeated. * * num_segments: Should equal the number of distinct segment IDs. * * Returns: * * {@code Output}: Has same shape as data, except for dimension 0 which * has size {@code k}, the number of segments. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseSegmentSqrtNWithNumSegments extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseSegmentSqrtNWithNumSegments(Pointer p) { super(p); } public SparseSegmentSqrtNWithNumSegments(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input indices, @ByVal Input segment_ids, @ByVal Input num_segments) { super((Pointer)null); allocate(scope, data, indices, segment_ids, num_segments); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input indices, @ByVal Input segment_ids, @ByVal Input num_segments); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SparseSegmentSqrtNWithNumSegments operation(Operation operation); public native @ByRef Output output(); public native SparseSegmentSqrtNWithNumSegments output(Output output); } /** Computes the sum along sparse segments of a tensor. * * Read * [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) * for an explanation of segments. * * Like {@code SegmentSum}, but {@code segment_ids} can have rank less than {@code data}'s first * dimension, selecting a subset of dimension 0, specified by {@code indices}. * * For example: * *
{@code python
 *  c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
 * 
 *  # Select two rows, one segment.
 *  tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
 *  # => [[0 0 0 0]]
 * 
 *  # Select two rows, two segment.
 *  tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
 *  # => [[ 1  2  3  4]
 *  #     [-1 -2 -3 -4]]
 * 
 *  # Select all rows, two segments.
 *  tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
 *  # => [[0 0 0 0]
 *  #     [5 6 7 8]]
 * 
 *  # Which is equivalent to:
 *  tf.segment_sum(c, tf.constant([0, 0, 1]))
 *  }
* * Arguments: * * scope: A Scope object * * indices: A 1-D tensor. Has same rank as {@code segment_ids}. * * segment_ids: A 1-D tensor. Values should be sorted and can be repeated. * * Returns: * * {@code Output}: Has same shape as data, except for dimension 0 which * has size {@code k}, the number of segments. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseSegmentSum extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseSegmentSum(Pointer p) { super(p); } public SparseSegmentSum(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input indices, @ByVal Input segment_ids) { super((Pointer)null); allocate(scope, data, indices, segment_ids); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input indices, @ByVal Input segment_ids); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SparseSegmentSum operation(Operation operation); public native @ByRef Output output(); public native SparseSegmentSum output(Output output); } /** Computes the sum along sparse segments of a tensor. * * Like {@code SparseSegmentSum}, but allows missing ids in {@code segment_ids}. If an id is * misisng, the {@code output} tensor at that position will be zeroed. * * Read * [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) * for an explanation of segments. * * For example: * *
{@code python
 *  c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
 * 
 *  tf.sparse_segment_sum_with_num_segments(
 *      c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3)
 *  # => [[0 0 0 0]
 *  #     [0 0 0 0]
 *  #     [0 0 0 0]]
 * 
 *  tf.sparse_segment_sum_with_num_segments(c,
 *                                          tf.constant([0, 1]),
 *                                          tf.constant([0, 2],
 *                                          num_segments=4))
 *  # => [[ 1  2  3  4]
 *  #     [ 0  0  0  0]
 *  #     [-1 -2 -3 -4]
 *  #     [ 0  0  0  0]]
 *  }
* * Arguments: * * scope: A Scope object * * indices: A 1-D tensor. Has same rank as {@code segment_ids}. * * segment_ids: A 1-D tensor. Values should be sorted and can be repeated. * * num_segments: Should equal the number of distinct segment IDs. * * Returns: * * {@code Output}: Has same shape as data, except for dimension 0 which * has size {@code num_segments}. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseSegmentSumWithNumSegments extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseSegmentSumWithNumSegments(Pointer p) { super(p); } public SparseSegmentSumWithNumSegments(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input indices, @ByVal Input segment_ids, @ByVal Input num_segments) { super((Pointer)null); allocate(scope, data, indices, segment_ids, num_segments); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input indices, @ByVal Input segment_ids, @ByVal Input num_segments); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SparseSegmentSumWithNumSegments operation(Operation operation); public native @ByRef Output output(); public native SparseSegmentSumWithNumSegments output(Output output); } /** Computes square root of x element-wise. * * I.e., \\(y = \sqrt{x} = x^{1/2}\\). * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Sqrt extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Sqrt(Pointer p) { super(p); } public Sqrt(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Sqrt operation(Operation operation); public native @ByRef Output y(); public native Sqrt y(Output y); } /** Computes square of x element-wise. * * I.e., \\(y = x * x = x^2\\). * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Square extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Square(Pointer p) { super(p); } public Square(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Square operation(Operation operation); public native @ByRef Output y(); public native Square y(Output y); } /** Returns (x - y)(x - y) element-wise. * * *NOTE*: {@code SquaredDifference} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class SquaredDifference extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SquaredDifference(Pointer p) { super(p); } public SquaredDifference(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SquaredDifference operation(Operation operation); public native @ByRef Output z(); public native SquaredDifference z(Output z); } /** Returns x - y element-wise. * * *NOTE*: {@code Subtract} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. * * Aliases: * * Sub */ @Namespace("tensorflow::ops") @NoOffset public static class Subtract extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Subtract(Pointer p) { super(p); } public Subtract(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Subtract operation(Operation operation); public native @ByRef Output z(); public native Subtract z(Output z); } /// /// /// /// /// /** Computes the sum of elements across dimensions of a tensor. * * Reduces {@code input} along the dimensions given in {@code axis}. Unless * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code axis}. If {@code keep_dims} is true, the reduced dimensions are * retained with length 1. * * Arguments: * * scope: A Scope object * * input: The tensor to reduce. * * axis: The dimensions to reduce. Must be in the range * {@code [-rank(input), rank(input))}. * * Optional attributes (see {@code Attrs}): * * keep_dims: If true, retain reduced dimensions with length 1. * * Returns: * * {@code Output}: The reduced tensor. * * Aliases: * * ReduceSum */ @Namespace("tensorflow::ops") @NoOffset public static class Sum extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Sum(Pointer p) { super(p); } /** Optional attribute setters for Sum */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, retain reduced dimensions with length 1. * * Defaults to false */ public native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @Cast("bool") boolean keep_dims_(); public native Attrs keep_dims_(boolean keep_dims_); } public Sum(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis) { super((Pointer)null); allocate(scope, input, axis); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis); public Sum(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, axis, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input axis, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native Sum operation(Operation operation); public native @ByRef Output output(); public native Sum output(Output output); } /// /// /** Computes tan of x element-wise. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Tan extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Tan(Pointer p) { super(p); } public Tan(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Tan operation(Operation operation); public native @ByRef Output y(); public native Tan y(Output y); } /** Computes hyperbolic tangent of {@code x} element-wise. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The y tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Tanh extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Tanh(Pointer p) { super(p); } public Tanh(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Tanh operation(Operation operation); public native @ByRef Output y(); public native Tanh y(Output y); } /** Returns x / y element-wise for integer types. * * Truncation designates that negative numbers will round fractional quantities * toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different * than Python semantics. See {@code FloorDiv} for a division function that matches * Python Semantics. * * *NOTE*: {@code TruncateDiv} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class TruncateDiv extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TruncateDiv(Pointer p) { super(p); } public TruncateDiv(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native TruncateDiv operation(Operation operation); public native @ByRef Output z(); public native TruncateDiv z(Output z); } /** Returns element-wise remainder of division. This emulates C semantics in that * * the result here is consistent with a truncating divide. E.g. {@code truncate(x / y) * * y + truncate_mod(x, y) = x}. * * *NOTE*: {@code TruncateMod} supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class TruncateMod extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TruncateMod(Pointer p) { super(p); } public TruncateMod(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native TruncateMod operation(Operation operation); public native @ByRef Output z(); public native TruncateMod z(Output z); } /** Computes the maximum along segments of a tensor. * * Read * [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) * for an explanation of segments. * * This operator is similar to the unsorted segment sum operator found * [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). * Instead of computing the sum over segments, it computes the maximum such that: * * \\(output_i = \max_{j...} data[j...]\\) where max is over tuples {@code j...} such * that {@code segment_ids[j...] == i}. * * If the maximum is empty for a given segment ID {@code i}, it outputs the smallest * possible value for the specific numeric type, * {@code output[i] = numeric_limits::lowest()}. * * If the given segment ID {@code i} is negative, then the corresponding value is * dropped, and will not be included in the result. * *
* *
* * Arguments: * * scope: A Scope object * * segment_ids: A tensor whose shape is a prefix of {@code data.shape}.END * } * out_arg { * name: "output" * description: <::max()}. * * If the given segment ID {@code i} is negative, then the corresponding value is * dropped, and will not be included in the result. * * Arguments: * * scope: A Scope object * * segment_ids: A tensor whose shape is a prefix of {@code data.shape}. * * Returns: * * {@code Output}: Has same shape as data, except for the first {@code segment_ids.rank} * dimensions, which are replaced with a single dimension which has size * {@code num_segments}. */ @Namespace("tensorflow::ops") @NoOffset public static class UnsortedSegmentMin extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UnsortedSegmentMin(Pointer p) { super(p); } public UnsortedSegmentMin(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input segment_ids, @ByVal Input num_segments) { super((Pointer)null); allocate(scope, data, segment_ids, num_segments); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input segment_ids, @ByVal Input num_segments); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native UnsortedSegmentMin operation(Operation operation); public native @ByRef Output output(); public native UnsortedSegmentMin output(Output output); } /** Computes the product along segments of a tensor. * * Read * [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#segmentation) * for an explanation of segments. * * This operator is similar to the unsorted segment sum operator found * [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). * Instead of computing the sum over segments, it computes the product of all * entries belonging to a segment such that: * * \\(output_i = \prod_{j...} data[j...]\\) where the product is over tuples * {@code j...} such that {@code segment_ids[j...] == i}. * * If there is no entry for a given segment ID {@code i}, it outputs 1. * * If the given segment ID {@code i} is negative, then the corresponding value is * dropped, and will not be included in the result. * * Arguments: * * scope: A Scope object * * segment_ids: A tensor whose shape is a prefix of {@code data.shape}. * * Returns: * * {@code Output}: Has same shape as data, except for the first {@code segment_ids.rank} * dimensions, which are replaced with a single dimension which has size * {@code num_segments}. */ @Namespace("tensorflow::ops") @NoOffset public static class UnsortedSegmentProd extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UnsortedSegmentProd(Pointer p) { super(p); } public UnsortedSegmentProd(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input segment_ids, @ByVal Input num_segments) { super((Pointer)null); allocate(scope, data, segment_ids, num_segments); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input segment_ids, @ByVal Input num_segments); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native UnsortedSegmentProd operation(Operation operation); public native @ByRef Output output(); public native UnsortedSegmentProd output(Output output); } /** Computes the sum along segments of a tensor. * * Read * [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation) * for an explanation of segments. * * Computes a tensor such that * \\(output[i] = \sum_{j...} data[j...]\\) where the sum is over tuples {@code j...} such * that {@code segment_ids[j...] == i}. Unlike {@code SegmentSum}, {@code segment_ids} * need not be sorted and need not cover all values in the full * range of valid values. * * If the sum is empty for a given segment ID {@code i}, {@code output[i] = 0}. * If the given segment ID {@code i} is negative, the value is dropped and will not be * added to the sum of the segment. * * {@code num_segments} should equal the number of distinct segment IDs. * *
* *
* * Arguments: * * scope: A Scope object * * segment_ids: A tensor whose shape is a prefix of {@code data.shape}. * * Returns: * * {@code Output}: Has same shape as data, except for the first {@code segment_ids.rank} * dimensions, which are replaced with a single dimension which has size * {@code num_segments}. */ @Namespace("tensorflow::ops") @NoOffset public static class UnsortedSegmentSum extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UnsortedSegmentSum(Pointer p) { super(p); } public UnsortedSegmentSum(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input segment_ids, @ByVal Input num_segments) { super((Pointer)null); allocate(scope, data, segment_ids, num_segments); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input data, @ByVal Input segment_ids, @ByVal Input num_segments); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native UnsortedSegmentSum operation(Operation operation); public native @ByRef Output output(); public native UnsortedSegmentSum output(Output output); } /** Returns 0 if x == 0, and x / y otherwise, elementwise. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Xdivy extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Xdivy(Pointer p) { super(p); } public Xdivy(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Xdivy operation(Operation operation); public native @ByRef Output z(); public native Xdivy z(Output z); } /** Returns 0 if x == 0, and x * log(y) otherwise, elementwise. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Xlogy extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Xlogy(Pointer p) { super(p); } public Xlogy(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y) { super((Pointer)null); allocate(scope, x, y); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input y); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Xlogy operation(Operation operation); public native @ByRef Output z(); public native Xlogy z(Output z); } /** Compute the Hurwitz zeta function \\(\zeta(x, q)\\). * * The Hurwitz zeta function is defined as: * * * \\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The z tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Zeta extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Zeta(Pointer p) { super(p); } public Zeta(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input q) { super((Pointer)null); allocate(scope, x, q); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input q); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Zeta operation(Operation operation); public native @ByRef Output z(); public native Zeta z(Output z); } /** \} */ // namespace ops // namespace tensorflow // #endif // TENSORFLOW_CC_OPS_MATH_OPS_H_ // Parsed from tensorflow/cc/ops/nn_ops.h // This file is MACHINE GENERATED! Do not edit. // #ifndef TENSORFLOW_CC_OPS_NN_OPS_H_ // #define TENSORFLOW_CC_OPS_NN_OPS_H_ // This file is MACHINE GENERATED! Do not edit. // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/cc/framework/scope.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/gtl/array_slice.h" /** \defgroup nn_ops Nn Ops * \{

* Performs average pooling on the input. * * Each entry in {@code output} is the mean of the corresponding size {@code ksize} * window in {@code value}. * * Arguments: * * scope: A Scope object * * value: 4-D with shape {@code [batch, height, width, channels]}. * * ksize: The size of the sliding window for each dimension of {@code value}. * * strides: The stride of the sliding window for each dimension of {@code value}. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * data_format: Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, in_channels, in_height, in_width]. * * Returns: * * {@code Output}: The average pooled output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class AvgPool extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool(Pointer p) { super(p); } /** Optional attribute setters for AvgPool */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, in_channels, in_height, in_width]. * * Defaults to "NHWC" */ public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); } public AvgPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, value, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public AvgPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, value, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding); public AvgPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, value, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding); public AvgPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, value, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding); public AvgPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, value, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public AvgPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, value, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding); public AvgPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, value, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public AvgPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, value, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public AvgPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, value, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public AvgPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, value, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public AvgPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, value, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public AvgPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, value, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public native @ByRef Operation operation(); public native AvgPool operation(Operation operation); public native @ByRef Output output(); public native AvgPool output(Output output); } /** Performs 3D average pooling on the input. * * Arguments: * * scope: A Scope object * * input: Shape {@code [batch, depth, rows, cols, channels]} tensor to pool over. * * ksize: 1-D tensor of length 5. The size of the window for each dimension of * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. * * strides: 1-D tensor of length 5. The stride of the sliding window for each * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * data_format: The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: * [batch, in_channels, in_depth, in_height, in_width]. * * Returns: * * {@code Output}: The average pooled output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class AvgPool3D extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool3D(Pointer p) { super(p); } /** Optional attribute setters for AvgPool3D */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: * [batch, in_channels, in_depth, in_height, in_width]. * * Defaults to "NDHWC" */ public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); } public AvgPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public AvgPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding); public AvgPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding); public AvgPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding); public AvgPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public AvgPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding); public AvgPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public AvgPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public AvgPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public AvgPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public AvgPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public AvgPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public native @ByRef Operation operation(); public native AvgPool3D operation(Operation operation); public native @ByRef Output output(); public native AvgPool3D output(Output output); } /** Computes gradients of average pooling function. * * Arguments: * * scope: A Scope object * * orig_input_shape: The original input dimensions. * * grad: Output backprop of shape {@code [batch, depth, rows, cols, channels]}. * * ksize: 1-D tensor of length 5. The size of the window for each dimension of * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. * * strides: 1-D tensor of length 5. The stride of the sliding window for each * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * data_format: The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: * [batch, in_channels, in_depth, in_height, in_width]. * * Returns: * * {@code Output}: The backprop for input. */ @Namespace("tensorflow::ops") @NoOffset public static class AvgPool3DGrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool3DGrad(Pointer p) { super(p); } /** Optional attribute setters for AvgPool3DGrad */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: * [batch, in_channels, in_depth, in_height, in_width]. * * Defaults to "NDHWC" */ public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); } public AvgPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, orig_input_shape, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public AvgPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, orig_input_shape, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding); public AvgPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, orig_input_shape, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding); public AvgPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, orig_input_shape, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding); public AvgPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, orig_input_shape, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public AvgPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, orig_input_shape, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding); public AvgPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input_shape, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public AvgPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input_shape, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public AvgPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input_shape, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public AvgPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input_shape, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public AvgPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input_shape, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public AvgPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input_shape, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input_shape, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public native @ByRef Operation operation(); public native AvgPool3DGrad operation(Operation operation); public native @ByRef Output output(); public native AvgPool3DGrad output(Output output); } /** Adds {@code bias} to {@code value}. * * This is a special case of {@code tf.add} where {@code bias} is restricted to be 1-D. * Broadcasting is supported, so {@code value} may have any number of dimensions. * * Arguments: * * scope: A Scope object * * value: Any number of dimensions. * * bias: 1-D with size the last dimension of {@code value}. * * Optional attributes (see {@code Attrs}): * * data_format: Specify the data format of the input and output data. With the * default format "NHWC", the bias tensor will be added to the last dimension * of the value tensor. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, in_channels, in_height, in_width]. * The tensor will be added to "in_channels", the third-to-the-last * dimension. * * Returns: * * {@code Output}: Broadcasted sum of {@code value} and {@code bias}. */ @Namespace("tensorflow::ops") @NoOffset public static class BiasAdd extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BiasAdd(Pointer p) { super(p); } /** Optional attribute setters for BiasAdd */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Specify the data format of the input and output data. With the * default format "NHWC", the bias tensor will be added to the last dimension * of the value tensor. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, in_channels, in_height, in_width]. * The tensor will be added to "in_channels", the third-to-the-last * dimension. * * Defaults to "NHWC" */ public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); } public BiasAdd(@Const @ByRef Scope scope, @ByVal Input value, @ByVal Input bias) { super((Pointer)null); allocate(scope, value, bias); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ByVal Input bias); public BiasAdd(@Const @ByRef Scope scope, @ByVal Input value, @ByVal Input bias, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, value, bias, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ByVal Input bias, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public native @ByRef Operation operation(); public native BiasAdd operation(Operation operation); public native @ByRef Output output(); public native BiasAdd output(Output output); } /** The backward operation for "BiasAdd" on the "bias" tensor. * * It accumulates all the values from out_backprop into the feature dimension. * For NHWC data format, the feature dimension is the last. For NCHW data format, * the feature dimension is the third-to-last. * * Arguments: * * scope: A Scope object * * out_backprop: Any number of dimensions. * * Optional attributes (see {@code Attrs}): * * data_format: Specify the data format of the input and output data. With the * default format "NHWC", the bias tensor will be added to the last dimension * of the value tensor. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, in_channels, in_height, in_width]. * The tensor will be added to "in_channels", the third-to-the-last * dimension. * * Returns: * * {@code Output}: 1-D with size the feature dimension of {@code out_backprop}. */ @Namespace("tensorflow::ops") @NoOffset public static class BiasAddGrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BiasAddGrad(Pointer p) { super(p); } /** Optional attribute setters for BiasAddGrad */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Specify the data format of the input and output data. With the * default format "NHWC", the bias tensor will be added to the last dimension * of the value tensor. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, in_channels, in_height, in_width]. * The tensor will be added to "in_channels", the third-to-the-last * dimension. * * Defaults to "NHWC" */ public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); } public BiasAddGrad(@Const @ByRef Scope scope, @ByVal Input out_backprop) { super((Pointer)null); allocate(scope, out_backprop); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input out_backprop); public BiasAddGrad(@Const @ByRef Scope scope, @ByVal Input out_backprop, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, out_backprop, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input out_backprop, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public native @ByRef Operation operation(); public native BiasAddGrad operation(Operation operation); public native @ByRef Output output(); public native BiasAddGrad output(Output output); } /** Computes a 2-D convolution given 4-D {@code input} and {@code filter} tensors. * * Given an input tensor of shape {@code [batch, in_height, in_width, in_channels]} * and a filter / kernel tensor of shape * {@code [filter_height, filter_width, in_channels, out_channels]}, this op * performs the following: * * 1. Flattens the filter to a 2-D matrix with shape * {@code [filter_height * filter_width * in_channels, output_channels]}. * 2. Extracts image patches from the input tensor to form a *virtual* * tensor of shape {@code [batch, out_height, out_width, * filter_height * filter_width * in_channels]}. * 3. For each patch, right-multiplies the filter matrix and the image patch * vector. * * In detail, with the default NHWC format, * * output[b, i, j, k] = * sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * * filter[di, dj, q, k] * * Must have {@code strides[0] = strides[3] = 1}. For the most common case of the same * horizontal and vertices strides, {@code strides = [1, stride, stride, 1]}. * * Arguments: * * scope: A Scope object * * input: A 4-D tensor. The dimension order is interpreted according to the value * of {@code data_format}, see below for details. * * filter: A 4-D tensor of shape * {@code [filter_height, filter_width, in_channels, out_channels]} * * strides: 1-D tensor of length 4. The stride of the sliding window for each * dimension of {@code input}. The dimension order is determined by the value of * {@code data_format}, see below for details. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * data_format: Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, height, width, channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, channels, height, width]. * * dilations: 1-D tensor of length 4. The dilation factor for each dimension of * {@code input}. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the * value of {@code data_format}, see above for details. Dilations in the batch and * depth dimensions must be 1. * * Returns: * * {@code Output}: A 4-D tensor. The dimension order is determined by the value of * {@code data_format}, see below for details. */ @Namespace("tensorflow::ops") @NoOffset public static class Conv2D extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv2D(Pointer p) { super(p); } /** Optional attribute setters for Conv2D */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to true */ /// public native @ByVal Attrs UseCudnnOnGpu(@Cast("bool") boolean x); /** Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, height, width, channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, channels, height, width]. * * Defaults to "NHWC" */ /// public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); /** 1-D tensor of length 4. The dilation factor for each dimension of * {@code input}. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the * value of {@code data_format}, see above for details. Dilations in the batch and * depth dimensions must be 1. * * Defaults to [1, 1, 1, 1] */ public native @ByVal Attrs Dilations(@ArraySlice IntPointer x); public native @ByVal Attrs Dilations(@ArraySlice IntBuffer x); public native @ByVal Attrs Dilations(@ArraySlice int... x); public native @Cast("bool") boolean use_cudnn_on_gpu_(); public native Attrs use_cudnn_on_gpu_(boolean use_cudnn_on_gpu_); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); public native @ArraySlice IntPointer dilations_(); public native Attrs dilations_(IntPointer dilations_); } public Conv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public Conv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece String padding); public Conv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece BytePointer padding); public Conv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece String padding); public Conv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public Conv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece String padding); public Conv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public Conv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public Conv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public Conv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public Conv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public Conv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseCudnnOnGpu(@Cast("bool") boolean x); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public static native @ByVal Attrs Dilations(@ArraySlice IntPointer x); public static native @ByVal Attrs Dilations(@ArraySlice IntBuffer x); public static native @ByVal Attrs Dilations(@ArraySlice int... x); public native @ByRef Operation operation(); public native Conv2D operation(Operation operation); public native @ByRef Output output(); public native Conv2D output(Output output); } /** Computes the gradients of convolution with respect to the filter. * * Arguments: * * scope: A Scope object * * input: 4-D with shape {@code [batch, in_height, in_width, in_channels]}. * * filter_sizes: An integer vector representing the tensor shape of {@code filter}, * where {@code filter} is a 4-D * {@code [filter_height, filter_width, in_channels, out_channels]} tensor. * * out_backprop: 4-D with shape {@code [batch, out_height, out_width, out_channels]}. * Gradients w.r.t. the output of the convolution. * * strides: The stride of the sliding window for each dimension of the input * of the convolution. Must be in the same order as the dimension specified with * format. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * data_format: Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, in_channels, in_height, in_width]. * * dilations: 1-D tensor of length 4. The dilation factor for each dimension of * {@code input}. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of * {@code data_format}, see above for details. Dilations in the batch and depth * dimensions must be 1. * * Returns: * * {@code Output}: 4-D with shape * {@code [filter_height, filter_width, in_channels, out_channels]}. Gradient w.r.t. * the {@code filter} input of the convolution. */ @Namespace("tensorflow::ops") @NoOffset public static class Conv2DBackpropFilter extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv2DBackpropFilter(Pointer p) { super(p); } /** Optional attribute setters for Conv2DBackpropFilter */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to true */ /// public native @ByVal Attrs UseCudnnOnGpu(@Cast("bool") boolean x); /** Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, in_channels, in_height, in_width]. * * Defaults to "NHWC" */ /// public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); /** 1-D tensor of length 4. The dilation factor for each dimension of * {@code input}. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of * {@code data_format}, see above for details. Dilations in the batch and depth * dimensions must be 1. * * Defaults to [1, 1, 1, 1] */ public native @ByVal Attrs Dilations(@ArraySlice IntPointer x); public native @ByVal Attrs Dilations(@ArraySlice IntBuffer x); public native @ByVal Attrs Dilations(@ArraySlice int... x); public native @Cast("bool") boolean use_cudnn_on_gpu_(); public native Attrs use_cudnn_on_gpu_(boolean use_cudnn_on_gpu_); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); public native @ArraySlice IntPointer dilations_(); public native Attrs dilations_(IntPointer dilations_); } public Conv2DBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public Conv2DBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding); public Conv2DBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding); public Conv2DBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding); public Conv2DBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public Conv2DBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding); public Conv2DBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public Conv2DBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public Conv2DBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public Conv2DBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public Conv2DBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public Conv2DBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseCudnnOnGpu(@Cast("bool") boolean x); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public static native @ByVal Attrs Dilations(@ArraySlice IntPointer x); public static native @ByVal Attrs Dilations(@ArraySlice IntBuffer x); public static native @ByVal Attrs Dilations(@ArraySlice int... x); public native @ByRef Operation operation(); public native Conv2DBackpropFilter operation(Operation operation); public native @ByRef Output output(); public native Conv2DBackpropFilter output(Output output); } /** Computes the gradients of convolution with respect to the input. * * Arguments: * * scope: A Scope object * * input_sizes: An integer vector representing the shape of {@code input}, * where {@code input} is a 4-D {@code [batch, height, width, channels]} tensor. * * filter: 4-D with shape * {@code [filter_height, filter_width, in_channels, out_channels]}. * * out_backprop: 4-D with shape {@code [batch, out_height, out_width, out_channels]}. * Gradients w.r.t. the output of the convolution. * * strides: The stride of the sliding window for each dimension of the input * of the convolution. Must be in the same order as the dimension specified with * format. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * data_format: Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, in_channels, in_height, in_width]. * * dilations: 1-D tensor of length 4. The dilation factor for each dimension of * {@code input}. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of * {@code data_format}, see above for details. Dilations in the batch and depth * dimensions must be 1. * * Returns: * * {@code Output}: 4-D with shape {@code [batch, in_height, in_width, in_channels]}. Gradient * w.r.t. the input of the convolution. */ @Namespace("tensorflow::ops") @NoOffset public static class Conv2DBackpropInput extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv2DBackpropInput(Pointer p) { super(p); } /** Optional attribute setters for Conv2DBackpropInput */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to true */ /// public native @ByVal Attrs UseCudnnOnGpu(@Cast("bool") boolean x); /** Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, in_channels, in_height, in_width]. * * Defaults to "NHWC" */ /// public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); /** 1-D tensor of length 4. The dilation factor for each dimension of * {@code input}. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of * {@code data_format}, see above for details. Dilations in the batch and depth * dimensions must be 1. * * Defaults to [1, 1, 1, 1] */ public native @ByVal Attrs Dilations(@ArraySlice IntPointer x); public native @ByVal Attrs Dilations(@ArraySlice IntBuffer x); public native @ByVal Attrs Dilations(@ArraySlice int... x); public native @Cast("bool") boolean use_cudnn_on_gpu_(); public native Attrs use_cudnn_on_gpu_(boolean use_cudnn_on_gpu_); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); public native @ArraySlice IntPointer dilations_(); public native Attrs dilations_(IntPointer dilations_); } public Conv2DBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public Conv2DBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding); public Conv2DBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding); public Conv2DBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding); public Conv2DBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public Conv2DBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding); public Conv2DBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public Conv2DBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public Conv2DBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public Conv2DBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public Conv2DBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public Conv2DBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseCudnnOnGpu(@Cast("bool") boolean x); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public static native @ByVal Attrs Dilations(@ArraySlice IntPointer x); public static native @ByVal Attrs Dilations(@ArraySlice IntBuffer x); public static native @ByVal Attrs Dilations(@ArraySlice int... x); public native @ByRef Operation operation(); public native Conv2DBackpropInput operation(Operation operation); public native @ByRef Output output(); public native Conv2DBackpropInput output(Output output); } /** Computes a 3-D convolution given 5-D {@code input} and {@code filter} tensors. * * In signal processing, cross-correlation is a measure of similarity of * two waveforms as a function of a time-lag applied to one of them. This * is also known as a sliding dot product or sliding inner-product. * * Our Conv3D implements a form of cross-correlation. * * Arguments: * * scope: A Scope object * * input: Shape {@code [batch, in_depth, in_height, in_width, in_channels]}. * * filter: Shape {@code [filter_depth, filter_height, filter_width, in_channels, * out_channels]}. {@code in_channels} must match between {@code input} and {@code filter}. * * strides: 1-D tensor of length 5. The stride of the sliding window for each * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * data_format: The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: * [batch, in_channels, in_depth, in_height, in_width]. * * dilations: 1-D tensor of length 5. The dilation factor for each dimension of * {@code input}. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the * value of {@code data_format}, see above for details. Dilations in the batch and * depth dimensions must be 1. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Conv3D extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv3D(Pointer p) { super(p); } /** Optional attribute setters for Conv3D */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: * [batch, in_channels, in_depth, in_height, in_width]. * * Defaults to "NDHWC" */ /// public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); /** 1-D tensor of length 5. The dilation factor for each dimension of * {@code input}. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the * value of {@code data_format}, see above for details. Dilations in the batch and * depth dimensions must be 1. * * Defaults to [1, 1, 1, 1, 1] */ public native @ByVal Attrs Dilations(@ArraySlice IntPointer x); public native @ByVal Attrs Dilations(@ArraySlice IntBuffer x); public native @ByVal Attrs Dilations(@ArraySlice int... x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); public native @ArraySlice IntPointer dilations_(); public native Attrs dilations_(IntPointer dilations_); } public Conv3D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public Conv3D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece String padding); public Conv3D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece BytePointer padding); public Conv3D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece String padding); public Conv3D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public Conv3D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece String padding); public Conv3D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public Conv3D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public Conv3D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public Conv3D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public Conv3D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public Conv3D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public static native @ByVal Attrs Dilations(@ArraySlice IntPointer x); public static native @ByVal Attrs Dilations(@ArraySlice IntBuffer x); public static native @ByVal Attrs Dilations(@ArraySlice int... x); public native @ByRef Operation operation(); public native Conv3D operation(Operation operation); public native @ByRef Output output(); public native Conv3D output(Output output); } /** Computes the gradients of 3-D convolution with respect to the filter. * * Arguments: * * scope: A Scope object * * input: Shape {@code [batch, depth, rows, cols, in_channels]}. * * filter_sizes: An integer vector representing the tensor shape of {@code filter}, * where {@code filter} is a 5-D * {@code [filter_depth, filter_height, filter_width, in_channels, out_channels]} * tensor. * * out_backprop: Backprop signal of shape {@code [batch, out_depth, out_rows, out_cols, * out_channels]}. * * strides: 1-D tensor of length 5. The stride of the sliding window for each * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * data_format: The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: * [batch, in_channels, in_depth, in_height, in_width]. * * dilations: 1-D tensor of length 5. The dilation factor for each dimension of * {@code input}. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the * value of {@code data_format}, see above for details. Dilations in the batch and * depth dimensions must be 1. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Conv3DBackpropFilterV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv3DBackpropFilterV2(Pointer p) { super(p); } /** Optional attribute setters for Conv3DBackpropFilterV2 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: * [batch, in_channels, in_depth, in_height, in_width]. * * Defaults to "NDHWC" */ /// public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); /** 1-D tensor of length 5. The dilation factor for each dimension of * {@code input}. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the * value of {@code data_format}, see above for details. Dilations in the batch and * depth dimensions must be 1. * * Defaults to [1, 1, 1, 1, 1] */ public native @ByVal Attrs Dilations(@ArraySlice IntPointer x); public native @ByVal Attrs Dilations(@ArraySlice IntBuffer x); public native @ByVal Attrs Dilations(@ArraySlice int... x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); public native @ArraySlice IntPointer dilations_(); public native Attrs dilations_(IntPointer dilations_); } public Conv3DBackpropFilterV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public Conv3DBackpropFilterV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding); public Conv3DBackpropFilterV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding); public Conv3DBackpropFilterV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding); public Conv3DBackpropFilterV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public Conv3DBackpropFilterV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding); public Conv3DBackpropFilterV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public Conv3DBackpropFilterV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public Conv3DBackpropFilterV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public Conv3DBackpropFilterV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public Conv3DBackpropFilterV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public Conv3DBackpropFilterV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public static native @ByVal Attrs Dilations(@ArraySlice IntPointer x); public static native @ByVal Attrs Dilations(@ArraySlice IntBuffer x); public static native @ByVal Attrs Dilations(@ArraySlice int... x); public native @ByRef Operation operation(); public native Conv3DBackpropFilterV2 operation(Operation operation); public native @ByRef Output output(); public native Conv3DBackpropFilterV2 output(Output output); } /** Computes the gradients of 3-D convolution with respect to the input. * * Arguments: * * scope: A Scope object * * input_sizes: An integer vector representing the tensor shape of {@code input}, * where {@code input} is a 5-D * {@code [batch, depth, rows, cols, in_channels]} tensor. * * filter: Shape {@code [depth, rows, cols, in_channels, out_channels]}. * {@code in_channels} must match between {@code input} and {@code filter}. * * out_backprop: Backprop signal of shape {@code [batch, out_depth, out_rows, out_cols, * out_channels]}. * * strides: 1-D tensor of length 5. The stride of the sliding window for each * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * data_format: The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: * [batch, in_channels, in_depth, in_height, in_width]. * * dilations: 1-D tensor of length 5. The dilation factor for each dimension of * {@code input}. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the * value of {@code data_format}, see above for details. Dilations in the batch and * depth dimensions must be 1. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Conv3DBackpropInputV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv3DBackpropInputV2(Pointer p) { super(p); } /** Optional attribute setters for Conv3DBackpropInputV2 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: * [batch, in_channels, in_depth, in_height, in_width]. * * Defaults to "NDHWC" */ /// public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); /** 1-D tensor of length 5. The dilation factor for each dimension of * {@code input}. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the * value of {@code data_format}, see above for details. Dilations in the batch and * depth dimensions must be 1. * * Defaults to [1, 1, 1, 1, 1] */ public native @ByVal Attrs Dilations(@ArraySlice IntPointer x); public native @ByVal Attrs Dilations(@ArraySlice IntBuffer x); public native @ByVal Attrs Dilations(@ArraySlice int... x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); public native @ArraySlice IntPointer dilations_(); public native Attrs dilations_(IntPointer dilations_); } public Conv3DBackpropInputV2(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public Conv3DBackpropInputV2(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding); public Conv3DBackpropInputV2(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding); public Conv3DBackpropInputV2(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding); public Conv3DBackpropInputV2(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public Conv3DBackpropInputV2(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding); public Conv3DBackpropInputV2(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public Conv3DBackpropInputV2(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public Conv3DBackpropInputV2(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public Conv3DBackpropInputV2(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public Conv3DBackpropInputV2(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public Conv3DBackpropInputV2(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public static native @ByVal Attrs Dilations(@ArraySlice IntPointer x); public static native @ByVal Attrs Dilations(@ArraySlice IntBuffer x); public static native @ByVal Attrs Dilations(@ArraySlice int... x); public native @ByRef Operation operation(); public native Conv3DBackpropInputV2 operation(Operation operation); public native @ByRef Output output(); public native Conv3DBackpropInputV2 output(Output output); } /** Returns the dimension index in the destination data format given the one in * * the source data format. * * Arguments: * * scope: A Scope object * * x: A Tensor with each element as a dimension index in source data format. * Must be in the range [-4, 4). * * Optional attributes (see {@code Attrs}): * * src_format: source data format. * * dst_format: destination data format. * * Returns: * * {@code Output}: A Tensor with each element as a dimension index in destination data format. */ @Namespace("tensorflow::ops") @NoOffset public static class DataFormatDimMap extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DataFormatDimMap(Pointer p) { super(p); } /** Optional attribute setters for DataFormatDimMap */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** source data format. * * Defaults to "NHWC" */ /// public native @ByVal Attrs SrcFormat(@StringPiece BytePointer x); public native @ByVal Attrs SrcFormat(@StringPiece String x); /** destination data format. * * Defaults to "NCHW" */ public native @ByVal Attrs DstFormat(@StringPiece BytePointer x); public native @ByVal Attrs DstFormat(@StringPiece String x); public native @StringPiece BytePointer src_format_(); public native Attrs src_format_(BytePointer src_format_); public native @StringPiece BytePointer dst_format_(); public native Attrs dst_format_(BytePointer dst_format_); } public DataFormatDimMap(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public DataFormatDimMap(@Const @ByRef Scope scope, @ByVal Input x, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, x, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs SrcFormat(@StringPiece BytePointer x); public static native @ByVal Attrs SrcFormat(@StringPiece String x); public static native @ByVal Attrs DstFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DstFormat(@StringPiece String x); public native @ByRef Operation operation(); public native DataFormatDimMap operation(Operation operation); public native @ByRef Output y(); public native DataFormatDimMap y(Output y); } /** Returns the permuted vector/tensor in the destination data format given the * * one in the source data format. * * Arguments: * * scope: A Scope object * * x: Vector of size 4 or Tensor of shape (4, 2) in source data format. * * Optional attributes (see {@code Attrs}): * * src_format: source data format. * * dst_format: destination data format. * * Returns: * * {@code Output}: Vector of size 4 or Tensor of shape (4, 2) in destination data format. */ @Namespace("tensorflow::ops") @NoOffset public static class DataFormatVecPermute extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DataFormatVecPermute(Pointer p) { super(p); } /** Optional attribute setters for DataFormatVecPermute */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** source data format. * * Defaults to "NHWC" */ /// public native @ByVal Attrs SrcFormat(@StringPiece BytePointer x); public native @ByVal Attrs SrcFormat(@StringPiece String x); /** destination data format. * * Defaults to "NCHW" */ public native @ByVal Attrs DstFormat(@StringPiece BytePointer x); public native @ByVal Attrs DstFormat(@StringPiece String x); public native @StringPiece BytePointer src_format_(); public native Attrs src_format_(BytePointer src_format_); public native @StringPiece BytePointer dst_format_(); public native Attrs dst_format_(BytePointer dst_format_); } public DataFormatVecPermute(@Const @ByRef Scope scope, @ByVal Input x) { super((Pointer)null); allocate(scope, x); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x); public DataFormatVecPermute(@Const @ByRef Scope scope, @ByVal Input x, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, x, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs SrcFormat(@StringPiece BytePointer x); public static native @ByVal Attrs SrcFormat(@StringPiece String x); public static native @ByVal Attrs DstFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DstFormat(@StringPiece String x); public native @ByRef Operation operation(); public native DataFormatVecPermute operation(Operation operation); public native @ByRef Output y(); public native DataFormatVecPermute y(Output y); } /** Computes a 2-D depthwise convolution given 4-D {@code input} and {@code filter} tensors. * * Given an input tensor of shape {@code [batch, in_height, in_width, in_channels]} * and a filter / kernel tensor of shape * {@code [filter_height, filter_width, in_channels, channel_multiplier]}, containing * {@code in_channels} convolutional filters of depth 1, {@code depthwise_conv2d} applies * a different filter to each input channel (expanding from 1 channel to * {@code channel_multiplier} channels for each), then concatenates the results * together. Thus, the output has {@code in_channels * channel_multiplier} channels. * *

{@code
 *  for k in 0..in_channels-1
 *    for q in 0..channel_multiplier-1
 *      output[b, i, j, k * channel_multiplier + q] =
 *        sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
 *                          filter[di, dj, k, q]
 *  }
* * Must have {@code strides[0] = strides[3] = 1}. For the most common case of the same * horizontal and vertices strides, {@code strides = [1, stride, stride, 1]}. * * Arguments: * * scope: A Scope object * * strides: 1-D of length 4. The stride of the sliding window for each dimension * of {@code input}. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * data_format: Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, height, width, channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, channels, height, width]. * * dilations: 1-D tensor of length 4. The dilation factor for each dimension of * {@code input}. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of * {@code data_format}, see above for details. Dilations in the batch and depth * dimensions must be 1. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class DepthwiseConv2dNative extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DepthwiseConv2dNative(Pointer p) { super(p); } /** Optional attribute setters for DepthwiseConv2dNative */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, height, width, channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, channels, height, width]. * * Defaults to "NHWC" */ /// public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); /** 1-D tensor of length 4. The dilation factor for each dimension of * {@code input}. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of * {@code data_format}, see above for details. Dilations in the batch and depth * dimensions must be 1. * * Defaults to [1, 1, 1, 1] */ public native @ByVal Attrs Dilations(@ArraySlice IntPointer x); public native @ByVal Attrs Dilations(@ArraySlice IntBuffer x); public native @ByVal Attrs Dilations(@ArraySlice int... x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); public native @ArraySlice IntPointer dilations_(); public native Attrs dilations_(IntPointer dilations_); } public DepthwiseConv2dNative(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public DepthwiseConv2dNative(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece String padding); public DepthwiseConv2dNative(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece BytePointer padding); public DepthwiseConv2dNative(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece String padding); public DepthwiseConv2dNative(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public DepthwiseConv2dNative(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece String padding); public DepthwiseConv2dNative(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public DepthwiseConv2dNative(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public DepthwiseConv2dNative(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public DepthwiseConv2dNative(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public DepthwiseConv2dNative(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public DepthwiseConv2dNative(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public static native @ByVal Attrs Dilations(@ArraySlice IntPointer x); public static native @ByVal Attrs Dilations(@ArraySlice IntBuffer x); public static native @ByVal Attrs Dilations(@ArraySlice int... x); public native @ByRef Operation operation(); public native DepthwiseConv2dNative operation(Operation operation); public native @ByRef Output output(); public native DepthwiseConv2dNative output(Output output); } /** Computes the gradients of depthwise convolution with respect to the filter. * * Arguments: * * scope: A Scope object * * input: 4-D with shape based on {@code data_format}. For example, if * {@code data_format} is 'NHWC' then {@code input} is a 4-D {@code [batch, in_height, * in_width, in_channels]} tensor. * * filter_sizes: An integer vector representing the tensor shape of {@code filter}, * where {@code filter} is a 4-D * {@code [filter_height, filter_width, in_channels, depthwise_multiplier]} tensor. * * out_backprop: 4-D with shape based on {@code data_format}. * For example, if {@code data_format} is 'NHWC' then * out_backprop shape is {@code [batch, out_height, out_width, out_channels]}. * Gradients w.r.t. the output of the convolution. * * strides: The stride of the sliding window for each dimension of the input * of the convolution. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * data_format: Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, height, width, channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, channels, height, width]. * * dilations: 1-D tensor of length 4. The dilation factor for each dimension of * {@code input}. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of * {@code data_format}, see above for details. Dilations in the batch and depth * dimensions must be 1. * * Returns: * * {@code Output}: 4-D with shape * {@code [filter_height, filter_width, in_channels, out_channels]}. Gradient w.r.t. * the {@code filter} input of the convolution. */ @Namespace("tensorflow::ops") @NoOffset public static class DepthwiseConv2dNativeBackpropFilter extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DepthwiseConv2dNativeBackpropFilter(Pointer p) { super(p); } /** Optional attribute setters for DepthwiseConv2dNativeBackpropFilter */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, height, width, channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, channels, height, width]. * * Defaults to "NHWC" */ /// public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); /** 1-D tensor of length 4. The dilation factor for each dimension of * {@code input}. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of * {@code data_format}, see above for details. Dilations in the batch and depth * dimensions must be 1. * * Defaults to [1, 1, 1, 1] */ public native @ByVal Attrs Dilations(@ArraySlice IntPointer x); public native @ByVal Attrs Dilations(@ArraySlice IntBuffer x); public native @ByVal Attrs Dilations(@ArraySlice int... x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); public native @ArraySlice IntPointer dilations_(); public native Attrs dilations_(IntPointer dilations_); } public DepthwiseConv2dNativeBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public DepthwiseConv2dNativeBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding); public DepthwiseConv2dNativeBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding); public DepthwiseConv2dNativeBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding); public DepthwiseConv2dNativeBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public DepthwiseConv2dNativeBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding); public DepthwiseConv2dNativeBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public DepthwiseConv2dNativeBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public DepthwiseConv2dNativeBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public DepthwiseConv2dNativeBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public DepthwiseConv2dNativeBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public DepthwiseConv2dNativeBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter_sizes, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter_sizes, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public static native @ByVal Attrs Dilations(@ArraySlice IntPointer x); public static native @ByVal Attrs Dilations(@ArraySlice IntBuffer x); public static native @ByVal Attrs Dilations(@ArraySlice int... x); public native @ByRef Operation operation(); public native DepthwiseConv2dNativeBackpropFilter operation(Operation operation); public native @ByRef Output output(); public native DepthwiseConv2dNativeBackpropFilter output(Output output); } /** Computes the gradients of depthwise convolution with respect to the input. * * Arguments: * * scope: A Scope object * * input_sizes: An integer vector representing the shape of {@code input}, based * on {@code data_format}. For example, if {@code data_format} is 'NHWC' then * {@code input} is a 4-D {@code [batch, height, width, channels]} tensor. * * filter: 4-D with shape * {@code [filter_height, filter_width, in_channels, depthwise_multiplier]}. * * out_backprop: 4-D with shape based on {@code data_format}. * For example, if {@code data_format} is 'NHWC' then * out_backprop shape is {@code [batch, out_height, out_width, out_channels]}. * Gradients w.r.t. the output of the convolution. * * strides: The stride of the sliding window for each dimension of the input * of the convolution. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * data_format: Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, height, width, channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, channels, height, width]. * * dilations: 1-D tensor of length 4. The dilation factor for each dimension of * {@code input}. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of * {@code data_format}, see above for details. Dilations in the batch and depth * dimensions must be 1. * * Returns: * * {@code Output}: 4-D with shape according to {@code data_format}. For example, if * {@code data_format} is 'NHWC', output shape is {@code [batch, in_height, * in_width, in_channels]}. Gradient w.r.t. the input of the * convolution. */ @Namespace("tensorflow::ops") @NoOffset public static class DepthwiseConv2dNativeBackpropInput extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DepthwiseConv2dNativeBackpropInput(Pointer p) { super(p); } /** Optional attribute setters for DepthwiseConv2dNativeBackpropInput */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, height, width, channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, channels, height, width]. * * Defaults to "NHWC" */ /// public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); /** 1-D tensor of length 4. The dilation factor for each dimension of * {@code input}. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of * {@code data_format}, see above for details. Dilations in the batch and depth * dimensions must be 1. * * Defaults to [1, 1, 1, 1] */ public native @ByVal Attrs Dilations(@ArraySlice IntPointer x); public native @ByVal Attrs Dilations(@ArraySlice IntBuffer x); public native @ByVal Attrs Dilations(@ArraySlice int... x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); public native @ArraySlice IntPointer dilations_(); public native Attrs dilations_(IntPointer dilations_); } public DepthwiseConv2dNativeBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public DepthwiseConv2dNativeBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding); public DepthwiseConv2dNativeBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding); public DepthwiseConv2dNativeBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding); public DepthwiseConv2dNativeBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public DepthwiseConv2dNativeBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding); public DepthwiseConv2dNativeBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public DepthwiseConv2dNativeBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public DepthwiseConv2dNativeBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public DepthwiseConv2dNativeBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public DepthwiseConv2dNativeBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public DepthwiseConv2dNativeBackpropInput(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_sizes, filter, out_backprop, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_sizes, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public static native @ByVal Attrs Dilations(@ArraySlice IntPointer x); public static native @ByVal Attrs Dilations(@ArraySlice IntBuffer x); public static native @ByVal Attrs Dilations(@ArraySlice int... x); public native @ByRef Operation operation(); public native DepthwiseConv2dNativeBackpropInput operation(Operation operation); public native @ByRef Output output(); public native DepthwiseConv2dNativeBackpropInput output(Output output); } /** Computes the grayscale dilation of 4-D {@code input} and 3-D {@code filter} tensors. * * The {@code input} tensor has shape {@code [batch, in_height, in_width, depth]} and the * {@code filter} tensor has shape {@code [filter_height, filter_width, depth]}, i.e., each * input channel is processed independently of the others with its own structuring * function. The {@code output} tensor has shape * {@code [batch, out_height, out_width, depth]}. The spatial dimensions of the output * tensor depend on the {@code padding} algorithm. We currently only support the default * "NHWC" {@code data_format}. * * In detail, the grayscale morphological 2-D dilation is the max-sum correlation * (for consistency with {@code conv2d}, we use unmirrored filters): * * output[b, y, x, c] = * max_{dy, dx} input[b, * strides[1] * y + rates[1] * dy, * strides[2] * x + rates[2] * dx, * c] + * filter[dy, dx, c] * * Max-pooling is a special case when the filter has size equal to the pooling * kernel size and contains all zeros. * * Note on duality: The dilation of {@code input} by the {@code filter} is equal to the * negation of the erosion of {@code -input} by the reflected {@code filter}. * * Arguments: * * scope: A Scope object * * input: 4-D with shape {@code [batch, in_height, in_width, depth]}. * * filter: 3-D with shape {@code [filter_height, filter_width, depth]}. * * strides: The stride of the sliding window for each dimension of the input * tensor. Must be: {@code [1, stride_height, stride_width, 1]}. * * rates: The input stride for atrous morphological dilation. Must be: * {@code [1, rate_height, rate_width, 1]}. * * padding: The type of padding algorithm to use. * * Returns: * * {@code Output}: 4-D with shape {@code [batch, out_height, out_width, depth]}. */ @Namespace("tensorflow::ops") @NoOffset public static class Dilation2D extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dilation2D(Pointer p) { super(p); } public Dilation2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @ArraySlice IntPointer rates, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @ArraySlice IntPointer rates, @StringPiece BytePointer padding); public Dilation2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @ArraySlice IntBuffer rates, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @ArraySlice IntBuffer rates, @StringPiece String padding); public Dilation2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @ArraySlice int[] rates, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @ArraySlice int[] rates, @StringPiece BytePointer padding); public Dilation2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @ArraySlice IntPointer rates, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntPointer strides, @ArraySlice IntPointer rates, @StringPiece String padding); public Dilation2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @ArraySlice IntBuffer rates, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice IntBuffer strides, @ArraySlice IntBuffer rates, @StringPiece BytePointer padding); public Dilation2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @ArraySlice int[] rates, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ArraySlice int[] strides, @ArraySlice int[] rates, @StringPiece String padding); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Dilation2D operation(Operation operation); public native @ByRef Output output(); public native Dilation2D output(Output output); } /** Computes the gradient of morphological 2-D dilation with respect to the filter. * * Arguments: * * scope: A Scope object * * input: 4-D with shape {@code [batch, in_height, in_width, depth]}. * * filter: 3-D with shape {@code [filter_height, filter_width, depth]}. * * out_backprop: 4-D with shape {@code [batch, out_height, out_width, depth]}. * * strides: 1-D of length 4. The stride of the sliding window for each dimension of * the input tensor. Must be: {@code [1, stride_height, stride_width, 1]}. * * rates: 1-D of length 4. The input stride for atrous morphological dilation. * Must be: {@code [1, rate_height, rate_width, 1]}. * * padding: The type of padding algorithm to use. * * Returns: * * {@code Output}: 3-D with shape {@code [filter_height, filter_width, depth]}. */ @Namespace("tensorflow::ops") @NoOffset public static class Dilation2DBackpropFilter extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dilation2DBackpropFilter(Pointer p) { super(p); } public Dilation2DBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @ArraySlice IntPointer rates, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, out_backprop, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @ArraySlice IntPointer rates, @StringPiece BytePointer padding); public Dilation2DBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @ArraySlice IntBuffer rates, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, out_backprop, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @ArraySlice IntBuffer rates, @StringPiece String padding); public Dilation2DBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @ArraySlice int[] rates, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, out_backprop, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @ArraySlice int[] rates, @StringPiece BytePointer padding); public Dilation2DBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @ArraySlice IntPointer rates, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, out_backprop, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @ArraySlice IntPointer rates, @StringPiece String padding); public Dilation2DBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @ArraySlice IntBuffer rates, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, out_backprop, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @ArraySlice IntBuffer rates, @StringPiece BytePointer padding); public Dilation2DBackpropFilter(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @ArraySlice int[] rates, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, out_backprop, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @ArraySlice int[] rates, @StringPiece String padding); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Dilation2DBackpropFilter operation(Operation operation); public native @ByRef Output filter_backprop(); public native Dilation2DBackpropFilter filter_backprop(Output filter_backprop); } /** Computes the gradient of morphological 2-D dilation with respect to the input. * * Arguments: * * scope: A Scope object * * input: 4-D with shape {@code [batch, in_height, in_width, depth]}. * * filter: 3-D with shape {@code [filter_height, filter_width, depth]}. * * out_backprop: 4-D with shape {@code [batch, out_height, out_width, depth]}. * * strides: 1-D of length 4. The stride of the sliding window for each dimension of * the input tensor. Must be: {@code [1, stride_height, stride_width, 1]}. * * rates: 1-D of length 4. The input stride for atrous morphological dilation. * Must be: {@code [1, rate_height, rate_width, 1]}. * * padding: The type of padding algorithm to use. * * Returns: * * {@code Output}: 4-D with shape {@code [batch, in_height, in_width, depth]}. */ @Namespace("tensorflow::ops") @NoOffset public static class Dilation2DBackpropInput extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dilation2DBackpropInput(Pointer p) { super(p); } public Dilation2DBackpropInput(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @ArraySlice IntPointer rates, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, out_backprop, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @ArraySlice IntPointer rates, @StringPiece BytePointer padding); public Dilation2DBackpropInput(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @ArraySlice IntBuffer rates, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, out_backprop, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @ArraySlice IntBuffer rates, @StringPiece String padding); public Dilation2DBackpropInput(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @ArraySlice int[] rates, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, out_backprop, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @ArraySlice int[] rates, @StringPiece BytePointer padding); public Dilation2DBackpropInput(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @ArraySlice IntPointer rates, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, out_backprop, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntPointer strides, @ArraySlice IntPointer rates, @StringPiece String padding); public Dilation2DBackpropInput(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @ArraySlice IntBuffer rates, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, out_backprop, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice IntBuffer strides, @ArraySlice IntBuffer rates, @StringPiece BytePointer padding); public Dilation2DBackpropInput(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @ArraySlice int[] rates, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, out_backprop, strides, rates, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input out_backprop, @ArraySlice int[] strides, @ArraySlice int[] rates, @StringPiece String padding); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Dilation2DBackpropInput operation(Operation operation); public native @ByRef Output in_backprop(); public native Dilation2DBackpropInput in_backprop(Output in_backprop); } /** Computes exponential linear: {@code exp(features) - 1} if < 0, {@code features} otherwise. * * See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) * ](http://arxiv.org/abs/1511.07289) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The activations tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Elu extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Elu(Pointer p) { super(p); } public Elu(@Const @ByRef Scope scope, @ByVal Input features) { super((Pointer)null); allocate(scope, features); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input features); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Elu operation(Operation operation); public native @ByRef Output activations(); public native Elu activations(Output activations); } /** Performs fractional average pooling on the input. * * Fractional average pooling is similar to Fractional max pooling in the pooling * region generation step. The only difference is that after pooling regions are * generated, a mean operation is performed instead of a max operation in each * pooling region. * * Arguments: * * scope: A Scope object * * value: 4-D with shape {@code [batch, height, width, channels]}. * * pooling_ratio: Pooling ratio for each dimension of {@code value}, currently only * supports row and col dimension and should be >= 1.0. For example, a valid * pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements * must be 1.0 because we don't allow pooling on batch and channels * dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions * respectively. * * Optional attributes (see {@code Attrs}): * * pseudo_random: When set to True, generates the pooling sequence in a * pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin * Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for * difference between pseudorandom and random. * * overlapping: When set to True, it means when pooling, the values at the boundary * of adjacent pooling cells are used by both cells. For example: * * {@code index 0 1 2 3 4} * * {@code value 20 5 16 3 7} * * If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. * The result would be [41/3, 26/3] for fractional avg pooling. * * deterministic: When set to True, a fixed pooling region will be used when * iterating over a FractionalAvgPool node in the computation graph. Mainly used * in unit test to make FractionalAvgPool deterministic. * * seed: If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * seed2: An second seed to avoid seed collision. * * Returns: * * {@code Output} output: output tensor after fractional avg pooling. * * {@code Output} row_pooling_sequence: row pooling sequence, needed to calculate gradient. * * {@code Output} col_pooling_sequence: column pooling sequence, needed to calculate gradient. */ @Namespace("tensorflow::ops") @NoOffset public static class FractionalAvgPool extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FractionalAvgPool(Pointer p) { super(p); } /** Optional attribute setters for FractionalAvgPool */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** When set to True, generates the pooling sequence in a * pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin * Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for * difference between pseudorandom and random. * * Defaults to false */ /// /// /// /// public native @ByVal Attrs PseudoRandom(@Cast("bool") boolean x); /** When set to True, it means when pooling, the values at the boundary * of adjacent pooling cells are used by both cells. For example: * * {@code index 0 1 2 3 4} * * {@code value 20 5 16 3 7} * * If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. * The result would be [41/3, 26/3] for fractional avg pooling. * * Defaults to false */ /// public native @ByVal Attrs Overlapping(@Cast("bool") boolean x); /** When set to True, a fixed pooling region will be used when * iterating over a FractionalAvgPool node in the computation graph. Mainly used * in unit test to make FractionalAvgPool deterministic. * * Defaults to false */ /// public native @ByVal Attrs Deterministic(@Cast("bool") boolean x); /** If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * Defaults to 0 */ /// public native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); /** An second seed to avoid seed collision. * * Defaults to 0 */ public native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @Cast("bool") boolean pseudo_random_(); public native Attrs pseudo_random_(boolean pseudo_random_); public native @Cast("bool") boolean overlapping_(); public native Attrs overlapping_(boolean overlapping_); public native @Cast("bool") boolean deterministic_(); public native Attrs deterministic_(boolean deterministic_); public native @Cast("tensorflow::int64") long seed_(); public native Attrs seed_(long seed_); public native @Cast("tensorflow::int64") long seed2_(); public native Attrs seed2_(long seed2_); } public FractionalAvgPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice FloatPointer pooling_ratio) { super((Pointer)null); allocate(scope, value, pooling_ratio); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice FloatPointer pooling_ratio); public FractionalAvgPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice FloatBuffer pooling_ratio) { super((Pointer)null); allocate(scope, value, pooling_ratio); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice FloatBuffer pooling_ratio); public FractionalAvgPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice float... pooling_ratio) { super((Pointer)null); allocate(scope, value, pooling_ratio); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice float... pooling_ratio); public FractionalAvgPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice FloatPointer pooling_ratio, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, value, pooling_ratio, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice FloatPointer pooling_ratio, @Const @ByRef Attrs attrs); public FractionalAvgPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice FloatBuffer pooling_ratio, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, value, pooling_ratio, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice FloatBuffer pooling_ratio, @Const @ByRef Attrs attrs); public FractionalAvgPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice float[] pooling_ratio, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, value, pooling_ratio, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice float[] pooling_ratio, @Const @ByRef Attrs attrs); public static native @ByVal Attrs PseudoRandom(@Cast("bool") boolean x); public static native @ByVal Attrs Overlapping(@Cast("bool") boolean x); public static native @ByVal Attrs Deterministic(@Cast("bool") boolean x); public static native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native FractionalAvgPool operation(Operation operation); public native @ByRef Output output(); public native FractionalAvgPool output(Output output); public native @ByRef Output row_pooling_sequence(); public native FractionalAvgPool row_pooling_sequence(Output row_pooling_sequence); public native @ByRef Output col_pooling_sequence(); public native FractionalAvgPool col_pooling_sequence(Output col_pooling_sequence); } /** Performs fractional max pooling on the input. * * Fractional max pooling is slightly different than regular max pooling. In * regular max pooling, you downsize an input set by taking the maximum value of * smaller N x N subsections of the set (often 2x2), and try to reduce the set by * a factor of N, where N is an integer. Fractional max pooling, as you might * expect from the word "fractional", means that the overall reduction ratio N * does not have to be an integer. * * The sizes of the pooling regions are generated randomly but are fairly uniform. * For example, let's look at the height dimension, and the constraints on the * list of rows that will be pool boundaries. * * First we define the following: * * 1. input_row_length : the number of rows from the input set * 2. output_row_length : which will be smaller than the input * 3. alpha = input_row_length / output_row_length : our reduction ratio * 4. K = floor(alpha) * 5. row_pooling_sequence : this is the result list of pool boundary rows * * Then, row_pooling_sequence should satisfy: * * 1. a[0] = 0 : the first value of the sequence is 0 * 2. a[end] = input_row_length : the last value of the sequence is the size * 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size * 4. length(row_pooling_sequence) = output_row_length+1 * * For more details on fractional max pooling, see this paper: * [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) * * Arguments: * * scope: A Scope object * * value: 4-D with shape {@code [batch, height, width, channels]}. * * pooling_ratio: Pooling ratio for each dimension of {@code value}, currently only * supports row and col dimension and should be >= 1.0. For example, a valid * pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements * must be 1.0 because we don't allow pooling on batch and channels * dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions * respectively. * * Optional attributes (see {@code Attrs}): * * pseudo_random: When set to True, generates the pooling sequence in a * pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin * Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for * difference between pseudorandom and random. * * overlapping: When set to True, it means when pooling, the values at the boundary * of adjacent pooling cells are used by both cells. For example: * * {@code index 0 1 2 3 4} * * {@code value 20 5 16 3 7} * * If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. * The result would be [20, 16] for fractional max pooling. * * deterministic: When set to True, a fixed pooling region will be used when * iterating over a FractionalMaxPool node in the computation graph. Mainly used * in unit test to make FractionalMaxPool deterministic. * * seed: If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * seed2: An second seed to avoid seed collision. * * Returns: * * {@code Output} output: output tensor after fractional max pooling. * * {@code Output} row_pooling_sequence: row pooling sequence, needed to calculate gradient. * * {@code Output} col_pooling_sequence: column pooling sequence, needed to calculate gradient. */ @Namespace("tensorflow::ops") @NoOffset public static class FractionalMaxPool extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FractionalMaxPool(Pointer p) { super(p); } /** Optional attribute setters for FractionalMaxPool */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** When set to True, generates the pooling sequence in a * pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin * Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for * difference between pseudorandom and random. * * Defaults to false */ /// /// /// /// public native @ByVal Attrs PseudoRandom(@Cast("bool") boolean x); /** When set to True, it means when pooling, the values at the boundary * of adjacent pooling cells are used by both cells. For example: * * {@code index 0 1 2 3 4} * * {@code value 20 5 16 3 7} * * If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. * The result would be [20, 16] for fractional max pooling. * * Defaults to false */ /// public native @ByVal Attrs Overlapping(@Cast("bool") boolean x); /** When set to True, a fixed pooling region will be used when * iterating over a FractionalMaxPool node in the computation graph. Mainly used * in unit test to make FractionalMaxPool deterministic. * * Defaults to false */ /// public native @ByVal Attrs Deterministic(@Cast("bool") boolean x); /** If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * Defaults to 0 */ /// public native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); /** An second seed to avoid seed collision. * * Defaults to 0 */ public native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @Cast("bool") boolean pseudo_random_(); public native Attrs pseudo_random_(boolean pseudo_random_); public native @Cast("bool") boolean overlapping_(); public native Attrs overlapping_(boolean overlapping_); public native @Cast("bool") boolean deterministic_(); public native Attrs deterministic_(boolean deterministic_); public native @Cast("tensorflow::int64") long seed_(); public native Attrs seed_(long seed_); public native @Cast("tensorflow::int64") long seed2_(); public native Attrs seed2_(long seed2_); } public FractionalMaxPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice FloatPointer pooling_ratio) { super((Pointer)null); allocate(scope, value, pooling_ratio); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice FloatPointer pooling_ratio); public FractionalMaxPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice FloatBuffer pooling_ratio) { super((Pointer)null); allocate(scope, value, pooling_ratio); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice FloatBuffer pooling_ratio); public FractionalMaxPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice float... pooling_ratio) { super((Pointer)null); allocate(scope, value, pooling_ratio); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice float... pooling_ratio); public FractionalMaxPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice FloatPointer pooling_ratio, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, value, pooling_ratio, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice FloatPointer pooling_ratio, @Const @ByRef Attrs attrs); public FractionalMaxPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice FloatBuffer pooling_ratio, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, value, pooling_ratio, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice FloatBuffer pooling_ratio, @Const @ByRef Attrs attrs); public FractionalMaxPool(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice float[] pooling_ratio, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, value, pooling_ratio, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @ArraySlice float[] pooling_ratio, @Const @ByRef Attrs attrs); public static native @ByVal Attrs PseudoRandom(@Cast("bool") boolean x); public static native @ByVal Attrs Overlapping(@Cast("bool") boolean x); public static native @ByVal Attrs Deterministic(@Cast("bool") boolean x); public static native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native FractionalMaxPool operation(Operation operation); public native @ByRef Output output(); public native FractionalMaxPool output(Output output); public native @ByRef Output row_pooling_sequence(); public native FractionalMaxPool row_pooling_sequence(Output row_pooling_sequence); public native @ByRef Output col_pooling_sequence(); public native FractionalMaxPool col_pooling_sequence(Output col_pooling_sequence); } /** Batch normalization. * * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. * * Arguments: * * scope: A Scope object * * x: A 4D Tensor for input data. * * scale: A 1D Tensor for scaling factor, to scale the normalized x. * * offset: A 1D Tensor for offset, to shift to the normalized x. * * mean: A 1D Tensor for population mean. Used for inference only; * must be empty for training. * * variance: A 1D Tensor for population variance. Used for inference only; * must be empty for training. * * Optional attributes (see {@code Attrs}): * * epsilon: A small float number added to the variance of x. * * data_format: The data format for x and y. Either "NHWC" (default) or "NCHW". * * is_training: A bool value to indicate the operation is for training (default) * or inference. * * Returns: * * {@code Output} y: A 4D Tensor for output data. * * {@code Output} batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow * to compute the running mean. * * {@code Output} batch_variance: A 1D Tensor for the computed batch variance, to be used by * TensorFlow to compute the running variance. * * {@code Output} reserve_space_1: A 1D Tensor for the computed batch mean, to be reused * in the gradient computation. * * {@code Output} reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance * in the cuDNN case), to be reused in the gradient computation. */ @Namespace("tensorflow::ops") @NoOffset public static class FusedBatchNorm extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FusedBatchNorm(Pointer p) { super(p); } /** Optional attribute setters for FusedBatchNorm */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** A small float number added to the variance of x. * * Defaults to 0.0001 */ /// public native @ByVal Attrs Epsilon(float x); /** The data format for x and y. Either "NHWC" (default) or "NCHW". * * Defaults to "NHWC" */ /// public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); /** A bool value to indicate the operation is for training (default) * or inference. * * Defaults to true */ public native @ByVal Attrs IsTraining(@Cast("bool") boolean x); public native float epsilon_(); public native Attrs epsilon_(float epsilon_); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); public native @Cast("bool") boolean is_training_(); public native Attrs is_training_(boolean is_training_); } public FusedBatchNorm(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input scale, @ByVal Input offset, @ByVal Input mean, @ByVal Input variance) { super((Pointer)null); allocate(scope, x, scale, offset, mean, variance); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input scale, @ByVal Input offset, @ByVal Input mean, @ByVal Input variance); public FusedBatchNorm(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input scale, @ByVal Input offset, @ByVal Input mean, @ByVal Input variance, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, x, scale, offset, mean, variance, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input scale, @ByVal Input offset, @ByVal Input mean, @ByVal Input variance, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Epsilon(float x); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public static native @ByVal Attrs IsTraining(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native FusedBatchNorm operation(Operation operation); public native @ByRef Output y(); public native FusedBatchNorm y(Output y); public native @ByRef Output batch_mean(); public native FusedBatchNorm batch_mean(Output batch_mean); public native @ByRef Output batch_variance(); public native FusedBatchNorm batch_variance(Output batch_variance); public native @ByRef Output reserve_space_1(); public native FusedBatchNorm reserve_space_1(Output reserve_space_1); public native @ByRef Output reserve_space_2(); public native FusedBatchNorm reserve_space_2(Output reserve_space_2); } /** Gradient for batch normalization. * * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. * * Arguments: * * scope: A Scope object * * y_backprop: A 4D Tensor for the gradient with respect to y. * * x: A 4D Tensor for input data. * * scale: A 1D Tensor for scaling factor, to scale the normalized x. * * reserve_space_1: When is_training is True, a 1D Tensor for the computed batch * mean to be reused in gradient computation. When is_training is * False, a 1D Tensor for the population mean to be reused in both * 1st and 2nd order gradient computation. * * reserve_space_2: When is_training is True, a 1D Tensor for the computed batch * variance (inverted variance in the cuDNN case) to be reused in * gradient computation. When is_training is False, a 1D Tensor * for the population variance to be reused in both 1st and 2nd * order gradient computation. * * Optional attributes (see {@code Attrs}): * * epsilon: A small float number added to the variance of x. * * data_format: The data format for y_backprop, x, x_backprop. * Either "NHWC" (default) or "NCHW". * * is_training: A bool value to indicate the operation is for training (default) * or inference. * * Returns: * * {@code Output} x_backprop: A 4D Tensor for the gradient with respect to x. * * {@code Output} scale_backprop: A 1D Tensor for the gradient with respect to scale. * * {@code Output} offset_backprop: A 1D Tensor for the gradient with respect to offset. * * {@code Output} reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm. * * {@code Output} reserve_space_4: Unused placeholder to match the variance input * in FusedBatchNorm. */ @Namespace("tensorflow::ops") @NoOffset public static class FusedBatchNormGrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FusedBatchNormGrad(Pointer p) { super(p); } /** Optional attribute setters for FusedBatchNormGrad */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** A small float number added to the variance of x. * * Defaults to 0.0001 */ /// public native @ByVal Attrs Epsilon(float x); /** The data format for y_backprop, x, x_backprop. * Either "NHWC" (default) or "NCHW". * * Defaults to "NHWC" */ /// public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); /** A bool value to indicate the operation is for training (default) * or inference. * * Defaults to true */ public native @ByVal Attrs IsTraining(@Cast("bool") boolean x); public native float epsilon_(); public native Attrs epsilon_(float epsilon_); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); public native @Cast("bool") boolean is_training_(); public native Attrs is_training_(boolean is_training_); } public FusedBatchNormGrad(@Const @ByRef Scope scope, @ByVal Input y_backprop, @ByVal Input x, @ByVal Input scale, @ByVal Input reserve_space_1, @ByVal Input reserve_space_2) { super((Pointer)null); allocate(scope, y_backprop, x, scale, reserve_space_1, reserve_space_2); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input y_backprop, @ByVal Input x, @ByVal Input scale, @ByVal Input reserve_space_1, @ByVal Input reserve_space_2); public FusedBatchNormGrad(@Const @ByRef Scope scope, @ByVal Input y_backprop, @ByVal Input x, @ByVal Input scale, @ByVal Input reserve_space_1, @ByVal Input reserve_space_2, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, y_backprop, x, scale, reserve_space_1, reserve_space_2, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input y_backprop, @ByVal Input x, @ByVal Input scale, @ByVal Input reserve_space_1, @ByVal Input reserve_space_2, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Epsilon(float x); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public static native @ByVal Attrs IsTraining(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native FusedBatchNormGrad operation(Operation operation); public native @ByRef Output x_backprop(); public native FusedBatchNormGrad x_backprop(Output x_backprop); public native @ByRef Output scale_backprop(); public native FusedBatchNormGrad scale_backprop(Output scale_backprop); public native @ByRef Output offset_backprop(); public native FusedBatchNormGrad offset_backprop(Output offset_backprop); public native @ByRef Output reserve_space_3(); public native FusedBatchNormGrad reserve_space_3(Output reserve_space_3); public native @ByRef Output reserve_space_4(); public native FusedBatchNormGrad reserve_space_4(Output reserve_space_4); } /** Gradient for batch normalization. * * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. * * Arguments: * * scope: A Scope object * * y_backprop: A 4D Tensor for the gradient with respect to y. * * x: A 4D Tensor for input data. * * scale: A 1D Tensor for scaling factor, to scale the normalized x. * * reserve_space_1: When is_training is True, a 1D Tensor for the computed batch * mean to be reused in gradient computation. When is_training is * False, a 1D Tensor for the population mean to be reused in both * 1st and 2nd order gradient computation. * * reserve_space_2: When is_training is True, a 1D Tensor for the computed batch * variance (inverted variance in the cuDNN case) to be reused in * gradient computation. When is_training is False, a 1D Tensor * for the population variance to be reused in both 1st and 2nd * order gradient computation. * * Optional attributes (see {@code Attrs}): * * epsilon: A small float number added to the variance of x. * * data_format: The data format for y_backprop, x, x_backprop. * Either "NHWC" (default) or "NCHW". * * is_training: A bool value to indicate the operation is for training (default) * or inference. * * Returns: * * {@code Output} x_backprop: A 4D Tensor for the gradient with respect to x. * * {@code Output} scale_backprop: A 1D Tensor for the gradient with respect to scale. * * {@code Output} offset_backprop: A 1D Tensor for the gradient with respect to offset. * * {@code Output} reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm. * * {@code Output} reserve_space_4: Unused placeholder to match the variance input * in FusedBatchNorm. */ @Namespace("tensorflow::ops") @NoOffset public static class FusedBatchNormGradV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FusedBatchNormGradV2(Pointer p) { super(p); } /** Optional attribute setters for FusedBatchNormGradV2 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** A small float number added to the variance of x. * * Defaults to 0.0001 */ /// public native @ByVal Attrs Epsilon(float x); /** The data format for y_backprop, x, x_backprop. * Either "NHWC" (default) or "NCHW". * * Defaults to "NHWC" */ /// public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); /** A bool value to indicate the operation is for training (default) * or inference. * * Defaults to true */ public native @ByVal Attrs IsTraining(@Cast("bool") boolean x); public native float epsilon_(); public native Attrs epsilon_(float epsilon_); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); public native @Cast("bool") boolean is_training_(); public native Attrs is_training_(boolean is_training_); } public FusedBatchNormGradV2(@Const @ByRef Scope scope, @ByVal Input y_backprop, @ByVal Input x, @ByVal Input scale, @ByVal Input reserve_space_1, @ByVal Input reserve_space_2) { super((Pointer)null); allocate(scope, y_backprop, x, scale, reserve_space_1, reserve_space_2); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input y_backprop, @ByVal Input x, @ByVal Input scale, @ByVal Input reserve_space_1, @ByVal Input reserve_space_2); public FusedBatchNormGradV2(@Const @ByRef Scope scope, @ByVal Input y_backprop, @ByVal Input x, @ByVal Input scale, @ByVal Input reserve_space_1, @ByVal Input reserve_space_2, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, y_backprop, x, scale, reserve_space_1, reserve_space_2, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input y_backprop, @ByVal Input x, @ByVal Input scale, @ByVal Input reserve_space_1, @ByVal Input reserve_space_2, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Epsilon(float x); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public static native @ByVal Attrs IsTraining(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native FusedBatchNormGradV2 operation(Operation operation); public native @ByRef Output x_backprop(); public native FusedBatchNormGradV2 x_backprop(Output x_backprop); public native @ByRef Output scale_backprop(); public native FusedBatchNormGradV2 scale_backprop(Output scale_backprop); public native @ByRef Output offset_backprop(); public native FusedBatchNormGradV2 offset_backprop(Output offset_backprop); public native @ByRef Output reserve_space_3(); public native FusedBatchNormGradV2 reserve_space_3(Output reserve_space_3); public native @ByRef Output reserve_space_4(); public native FusedBatchNormGradV2 reserve_space_4(Output reserve_space_4); } /** Batch normalization. * * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. * * Arguments: * * scope: A Scope object * * x: A 4D Tensor for input data. * * scale: A 1D Tensor for scaling factor, to scale the normalized x. * * offset: A 1D Tensor for offset, to shift to the normalized x. * * mean: A 1D Tensor for population mean. Used for inference only; * must be empty for training. * * variance: A 1D Tensor for population variance. Used for inference only; * must be empty for training. * * Optional attributes (see {@code Attrs}): * * epsilon: A small float number added to the variance of x. * * data_format: The data format for x and y. Either "NHWC" (default) or "NCHW". * * is_training: A bool value to indicate the operation is for training (default) * or inference. * * Returns: * * {@code Output} y: A 4D Tensor for output data. * * {@code Output} batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow * to compute the running mean. * * {@code Output} batch_variance: A 1D Tensor for the computed batch variance, to be used by * TensorFlow to compute the running variance. * * {@code Output} reserve_space_1: A 1D Tensor for the computed batch mean, to be reused * in the gradient computation. * * {@code Output} reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance * in the cuDNN case), to be reused in the gradient computation. */ @Namespace("tensorflow::ops") @NoOffset public static class FusedBatchNormV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FusedBatchNormV2(Pointer p) { super(p); } /** Optional attribute setters for FusedBatchNormV2 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** A small float number added to the variance of x. * * Defaults to 0.0001 */ /// public native @ByVal Attrs Epsilon(float x); /** The data format for x and y. Either "NHWC" (default) or "NCHW". * * Defaults to "NHWC" */ /// public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); /** A bool value to indicate the operation is for training (default) * or inference. * * Defaults to true */ public native @ByVal Attrs IsTraining(@Cast("bool") boolean x); public native float epsilon_(); public native Attrs epsilon_(float epsilon_); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); public native @Cast("bool") boolean is_training_(); public native Attrs is_training_(boolean is_training_); } public FusedBatchNormV2(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input scale, @ByVal Input offset, @ByVal Input mean, @ByVal Input variance) { super((Pointer)null); allocate(scope, x, scale, offset, mean, variance); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input scale, @ByVal Input offset, @ByVal Input mean, @ByVal Input variance); public FusedBatchNormV2(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input scale, @ByVal Input offset, @ByVal Input mean, @ByVal Input variance, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, x, scale, offset, mean, variance, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input x, @ByVal Input scale, @ByVal Input offset, @ByVal Input mean, @ByVal Input variance, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Epsilon(float x); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public static native @ByVal Attrs IsTraining(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native FusedBatchNormV2 operation(Operation operation); public native @ByRef Output y(); public native FusedBatchNormV2 y(Output y); public native @ByRef Output batch_mean(); public native FusedBatchNormV2 batch_mean(Output batch_mean); public native @ByRef Output batch_variance(); public native FusedBatchNormV2 batch_variance(Output batch_variance); public native @ByRef Output reserve_space_1(); public native FusedBatchNormV2 reserve_space_1(Output reserve_space_1); public native @ByRef Output reserve_space_2(); public native FusedBatchNormV2 reserve_space_2(Output reserve_space_2); } /** Performs a padding as a preprocess during a convolution. * * Similar to FusedResizeAndPadConv2d, this op allows for an optimized * implementation where the spatial padding transformation stage is fused with the * im2col lookup, but in this case without the bilinear filtering required for * resizing. Fusing the padding prevents the need to write out the intermediate * results as whole tensors, reducing memory pressure, and we can get some latency * gains by merging the transformation calculations. * The data_format attribute for Conv2D isn't supported by this op, and 'NHWC' * order is used instead. * Internally this op uses a single per-graph scratch buffer, which means that it * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. * * Arguments: * * scope: A Scope object * * input: 4-D with shape {@code [batch, in_height, in_width, in_channels]}. * * paddings: A two-column matrix specifying the padding sizes. The number of * rows must be the same as the rank of {@code input}. * * filter: 4-D with shape * {@code [filter_height, filter_width, in_channels, out_channels]}. * * strides: 1-D of length 4. The stride of the sliding window for each dimension * of {@code input}. Must be in the same order as the dimension specified with format. * * padding: The type of padding algorithm to use. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class FusedPadConv2D extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FusedPadConv2D(Pointer p) { super(p); } public FusedPadConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings, @ByVal Input filter, @StringPiece BytePointer mode, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, paddings, filter, mode, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings, @ByVal Input filter, @StringPiece BytePointer mode, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public FusedPadConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings, @ByVal Input filter, @StringPiece String mode, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, paddings, filter, mode, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings, @ByVal Input filter, @StringPiece String mode, @ArraySlice IntBuffer strides, @StringPiece String padding); public FusedPadConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings, @ByVal Input filter, @StringPiece BytePointer mode, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, paddings, filter, mode, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings, @ByVal Input filter, @StringPiece BytePointer mode, @ArraySlice int[] strides, @StringPiece BytePointer padding); public FusedPadConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings, @ByVal Input filter, @StringPiece String mode, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, paddings, filter, mode, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings, @ByVal Input filter, @StringPiece String mode, @ArraySlice IntPointer strides, @StringPiece String padding); public FusedPadConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings, @ByVal Input filter, @StringPiece BytePointer mode, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, paddings, filter, mode, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings, @ByVal Input filter, @StringPiece BytePointer mode, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public FusedPadConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings, @ByVal Input filter, @StringPiece String mode, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, paddings, filter, mode, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input paddings, @ByVal Input filter, @StringPiece String mode, @ArraySlice int[] strides, @StringPiece String padding); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native FusedPadConv2D operation(Operation operation); public native @ByRef Output output(); public native FusedPadConv2D output(Output output); } /** Performs a resize and padding as a preprocess during a convolution. * * It's often possible to do spatial transformations more efficiently as part of * the packing stage of a convolution, so this op allows for an optimized * implementation where these stages are fused together. This prevents the need to * write out the intermediate results as whole tensors, reducing memory pressure, * and we can get some latency gains by merging the transformation calculations. * The data_format attribute for Conv2D isn't supported by this op, and defaults to * 'NHWC' order. * Internally this op uses a single per-graph scratch buffer, which means that it * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. * * Arguments: * * scope: A Scope object * * input: 4-D with shape {@code [batch, in_height, in_width, in_channels]}. * * size: A 1-D int32 Tensor of 2 elements: {@code new_height, new_width}. The * new size for the images. * * paddings: A two-column matrix specifying the padding sizes. The number of * rows must be the same as the rank of {@code input}. * * filter: 4-D with shape * {@code [filter_height, filter_width, in_channels, out_channels]}. * * strides: 1-D of length 4. The stride of the sliding window for each dimension * of {@code input}. Must be in the same order as the dimension specified with format. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * resize_align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are * aligned, preserving the values at the corner pixels. Defaults to false. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class FusedResizeAndPadConv2D extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FusedResizeAndPadConv2D(Pointer p) { super(p); } /** Optional attribute setters for FusedResizeAndPadConv2D */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, the centers of the 4 corner pixels of the input and output tensors are * aligned, preserving the values at the corner pixels. Defaults to false. * * Defaults to false */ public native @ByVal Attrs ResizeAlignCorners(@Cast("bool") boolean x); public native @Cast("bool") boolean resize_align_corners_(); public native Attrs resize_align_corners_(boolean resize_align_corners_); } public FusedResizeAndPadConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece BytePointer mode, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, size, paddings, filter, mode, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece BytePointer mode, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public FusedResizeAndPadConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece String mode, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, size, paddings, filter, mode, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece String mode, @ArraySlice IntBuffer strides, @StringPiece String padding); public FusedResizeAndPadConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece BytePointer mode, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, size, paddings, filter, mode, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece BytePointer mode, @ArraySlice int[] strides, @StringPiece BytePointer padding); public FusedResizeAndPadConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece String mode, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, size, paddings, filter, mode, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece String mode, @ArraySlice IntPointer strides, @StringPiece String padding); public FusedResizeAndPadConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece BytePointer mode, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, size, paddings, filter, mode, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece BytePointer mode, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public FusedResizeAndPadConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece String mode, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, size, paddings, filter, mode, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece String mode, @ArraySlice int[] strides, @StringPiece String padding); public FusedResizeAndPadConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece BytePointer mode, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, size, paddings, filter, mode, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece BytePointer mode, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public FusedResizeAndPadConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece String mode, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, size, paddings, filter, mode, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece String mode, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public FusedResizeAndPadConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece BytePointer mode, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, size, paddings, filter, mode, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece BytePointer mode, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public FusedResizeAndPadConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece String mode, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, size, paddings, filter, mode, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece String mode, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public FusedResizeAndPadConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece BytePointer mode, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, size, paddings, filter, mode, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece BytePointer mode, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public FusedResizeAndPadConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece String mode, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, size, paddings, filter, mode, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input size, @ByVal Input paddings, @ByVal Input filter, @StringPiece String mode, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs ResizeAlignCorners(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native FusedResizeAndPadConv2D operation(Operation operation); public native @ByRef Output output(); public native FusedResizeAndPadConv2D output(Output output); } /** Says whether the targets are in the top {@code K} predictions. * * This outputs a {@code batch_size} bool array, an entry {@code out[i]} is {@code true} if the * prediction for the target class is among the top {@code k} predictions among * all predictions for example {@code i}. Note that the behavior of {@code InTopK} differs * from the {@code TopK} op in its handling of ties; if multiple classes have the * same prediction value and straddle the top-{@code k} boundary, all of those * classes are considered to be in the top {@code k}. * * More formally, let * * \\(predictions_i\\) be the predictions for all classes for example {@code i}, * \\(targets_i\\) be the target class for example {@code i}, * \\(out_i\\) be the output for example {@code i}, * * $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ * * Arguments: * * scope: A Scope object * * predictions: A {@code batch_size} x {@code classes} tensor. * * targets: A {@code batch_size} vector of class ids. * * k: Number of top elements to look at for computing precision. * * Returns: * * {@code Output}: Computed Precision at {@code k} as a {@code bool Tensor}. */ @Namespace("tensorflow::ops") @NoOffset public static class InTopK extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InTopK(Pointer p) { super(p); } public InTopK(@Const @ByRef Scope scope, @ByVal Input predictions, @ByVal Input targets, @Cast("tensorflow::int64") long k) { super((Pointer)null); allocate(scope, predictions, targets, k); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input predictions, @ByVal Input targets, @Cast("tensorflow::int64") long k); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native InTopK operation(Operation operation); public native @ByRef Output precision(); public native InTopK precision(Output precision); } /** Says whether the targets are in the top {@code K} predictions. * * This outputs a {@code batch_size} bool array, an entry {@code out[i]} is {@code true} if the * prediction for the target class is among the top {@code k} predictions among * all predictions for example {@code i}. Note that the behavior of {@code InTopK} differs * from the {@code TopK} op in its handling of ties; if multiple classes have the * same prediction value and straddle the top-{@code k} boundary, all of those * classes are considered to be in the top {@code k}. * * More formally, let * * \\(predictions_i\\) be the predictions for all classes for example {@code i}, * \\(targets_i\\) be the target class for example {@code i}, * \\(out_i\\) be the output for example {@code i}, * * $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ * * Arguments: * * scope: A Scope object * * predictions: A {@code batch_size} x {@code classes} tensor. * * targets: A {@code batch_size} vector of class ids. * * k: Number of top elements to look at for computing precision. * * Returns: * * {@code Output}: Computed precision at {@code k} as a {@code bool Tensor}. */ @Namespace("tensorflow::ops") @NoOffset public static class InTopKV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InTopKV2(Pointer p) { super(p); } public InTopKV2(@Const @ByRef Scope scope, @ByVal Input predictions, @ByVal Input targets, @ByVal Input k) { super((Pointer)null); allocate(scope, predictions, targets, k); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input predictions, @ByVal Input targets, @ByVal Input k); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native InTopKV2 operation(Operation operation); public native @ByRef Output precision(); public native InTopKV2 precision(Output precision); } /** L2 Loss. * * Computes half the L2 norm of a tensor without the {@code sqrt}: * * output = sum(t ** 2) / 2 * * Arguments: * * scope: A Scope object * * t: Typically 2-D, but may have any dimensions. * * Returns: * * {@code Output}: 0-D. */ @Namespace("tensorflow::ops") @NoOffset public static class L2Loss extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public L2Loss(Pointer p) { super(p); } public L2Loss(@Const @ByRef Scope scope, @ByVal Input t) { super((Pointer)null); allocate(scope, t); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input t); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native L2Loss operation(Operation operation); public native @ByRef Output output(); public native L2Loss output(Output output); } /** Local Response Normalization. * * The 4-D {@code input} tensor is treated as a 3-D array of 1-D vectors (along the last * dimension), and each vector is normalized independently. Within a given vector, * each component is divided by the weighted, squared sum of inputs within * {@code depth_radius}. In detail, * * sqr_sum[a, b, c, d] = * sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) * output = input / (bias + alpha * sqr_sum) ** beta * * For details, see [Krizhevsky et al., ImageNet classification with deep * convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks). * * Arguments: * * scope: A Scope object * * input: 4-D. * * Optional attributes (see {@code Attrs}): * * depth_radius: 0-D. Half-width of the 1-D normalization window. * * bias: An offset (usually positive to avoid dividing by 0). * * alpha: A scale factor, usually positive. * * beta: An exponent. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class LRN extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LRN(Pointer p) { super(p); } /** Optional attribute setters for LRN */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** 0-D. Half-width of the 1-D normalization window. * * Defaults to 5 */ /// public native @ByVal Attrs DepthRadius(@Cast("tensorflow::int64") long x); /** An offset (usually positive to avoid dividing by 0). * * Defaults to 1 */ /// public native @ByVal Attrs Bias(float x); /** A scale factor, usually positive. * * Defaults to 1 */ /// public native @ByVal Attrs Alpha(float x); /** An exponent. * * Defaults to 0.5 */ public native @ByVal Attrs Beta(float x); public native @Cast("tensorflow::int64") long depth_radius_(); public native Attrs depth_radius_(long depth_radius_); public native float bias_(); public native Attrs bias_(float bias_); public native float alpha_(); public native Attrs alpha_(float alpha_); public native float beta_(); public native Attrs beta_(float beta_); } public LRN(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public LRN(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DepthRadius(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Bias(float x); public static native @ByVal Attrs Alpha(float x); public static native @ByVal Attrs Beta(float x); public native @ByRef Operation operation(); public native LRN operation(Operation operation); public native @ByRef Output output(); public native LRN output(Output output); } /** Computes log softmax activations. * * For each batch {@code i} and class {@code j} we have * * logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) * * Arguments: * * scope: A Scope object * * logits: 2-D with shape {@code [batch_size, num_classes]}. * * Returns: * * {@code Output}: Same shape as {@code logits}. */ @Namespace("tensorflow::ops") @NoOffset public static class LogSoftmax extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LogSoftmax(Pointer p) { super(p); } public LogSoftmax(@Const @ByRef Scope scope, @ByVal Input logits) { super((Pointer)null); allocate(scope, logits); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input logits); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native LogSoftmax operation(Operation operation); public native @ByRef Output logsoftmax(); public native LogSoftmax logsoftmax(Output logsoftmax); } /** Performs max pooling on the input. * * Arguments: * * scope: A Scope object * * input: 4-D input to pool over. * * ksize: The size of the window for each dimension of the input tensor. * * strides: The stride of the sliding window for each dimension of the * input tensor. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * data_format: Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, in_channels, in_height, in_width]. * * Returns: * * {@code Output}: The max pooled output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class MaxPool extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool(Pointer p) { super(p); } /** Optional attribute setters for MaxPool */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, in_channels, in_height, in_width]. * * Defaults to "NHWC" */ public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); } public MaxPool(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public MaxPool(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding); public MaxPool(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding); public MaxPool(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding); public MaxPool(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public MaxPool(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding); public MaxPool(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPool(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public MaxPool(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPool(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public MaxPool(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPool(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public native @ByRef Operation operation(); public native MaxPool operation(Operation operation); public native @ByRef Output output(); public native MaxPool output(Output output); } /** Performs 3D max pooling on the input. * * Arguments: * * scope: A Scope object * * input: Shape {@code [batch, depth, rows, cols, channels]} tensor to pool over. * * ksize: 1-D tensor of length 5. The size of the window for each dimension of * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. * * strides: 1-D tensor of length 5. The stride of the sliding window for each * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * data_format: The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: * [batch, in_channels, in_depth, in_height, in_width]. * * Returns: * * {@code Output}: The max pooled output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class MaxPool3D extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool3D(Pointer p) { super(p); } /** Optional attribute setters for MaxPool3D */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: * [batch, in_channels, in_depth, in_height, in_width]. * * Defaults to "NDHWC" */ public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); } public MaxPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public MaxPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding); public MaxPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding); public MaxPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding); public MaxPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public MaxPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding); public MaxPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public MaxPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public MaxPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPool3D(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public native @ByRef Operation operation(); public native MaxPool3D operation(Operation operation); public native @ByRef Output output(); public native MaxPool3D output(Output output); } /** Computes gradients of max pooling function. * * Arguments: * * scope: A Scope object * * orig_input: The original input tensor. * * orig_output: The original output tensor. * * grad: Output backprop of shape {@code [batch, depth, rows, cols, channels]}. * * ksize: 1-D tensor of length 5. The size of the window for each dimension of * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. * * strides: 1-D tensor of length 5. The stride of the sliding window for each * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * data_format: The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: * [batch, in_channels, in_depth, in_height, in_width]. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class MaxPool3DGrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool3DGrad(Pointer p) { super(p); } /** Optional attribute setters for MaxPool3DGrad */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: * [batch, in_channels, in_depth, in_height, in_width]. * * Defaults to "NDHWC" */ public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); } public MaxPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public MaxPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding); public MaxPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding); public MaxPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding); public MaxPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public MaxPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding); public MaxPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public MaxPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public MaxPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPool3DGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public native @ByRef Operation operation(); public native MaxPool3DGrad operation(Operation operation); public native @ByRef Output output(); public native MaxPool3DGrad output(Output output); } /** Computes second-order gradients of the maxpooling function. * * Arguments: * * scope: A Scope object * * orig_input: The original input tensor. * * orig_output: The original output tensor. * * grad: Output backprop of shape {@code [batch, depth, rows, cols, channels]}. * * ksize: 1-D tensor of length 5. The size of the window for each dimension of * the input tensor. Must have {@code ksize[0] = ksize[4] = 1}. * * strides: 1-D tensor of length 5. The stride of the sliding window for each * dimension of {@code input}. Must have {@code strides[0] = strides[4] = 1}. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * data_format: The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: * [batch, in_channels, in_depth, in_height, in_width]. * * Returns: * * {@code Output}: Gradients of gradients w.r.t. the input to {@code max_pool}. */ @Namespace("tensorflow::ops") @NoOffset public static class MaxPool3DGradGrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool3DGradGrad(Pointer p) { super(p); } /** Optional attribute setters for MaxPool3DGradGrad */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: * [batch, in_channels, in_depth, in_height, in_width]. * * Defaults to "NDHWC" */ public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); } public MaxPool3DGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public MaxPool3DGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding); public MaxPool3DGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding); public MaxPool3DGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding); public MaxPool3DGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public MaxPool3DGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding); public MaxPool3DGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPool3DGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public MaxPool3DGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPool3DGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public MaxPool3DGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPool3DGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public native @ByRef Operation operation(); public native MaxPool3DGradGrad operation(Operation operation); public native @ByRef Output output(); public native MaxPool3DGradGrad output(Output output); } /** Computes second-order gradients of the maxpooling function. * * Arguments: * * scope: A Scope object * * orig_input: The original input tensor. * * orig_output: The original output tensor. * * grad: 4-D. Gradients of gradients w.r.t. the input of {@code max_pool}. * * ksize: The size of the window for each dimension of the input tensor. * * strides: The stride of the sliding window for each dimension of the * input tensor. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * data_format: Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, in_channels, in_height, in_width]. * * Returns: * * {@code Output}: Gradients of gradients w.r.t. the input to {@code max_pool}. */ @Namespace("tensorflow::ops") @NoOffset public static class MaxPoolGradGrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPoolGradGrad(Pointer p) { super(p); } /** Optional attribute setters for MaxPoolGradGrad */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, in_channels, in_height, in_width]. * * Defaults to "NHWC" */ public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); } public MaxPoolGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public MaxPoolGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding); public MaxPoolGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding); public MaxPoolGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding); public MaxPoolGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public MaxPoolGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding); public MaxPoolGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPoolGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public MaxPoolGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPoolGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public MaxPoolGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPoolGradGrad(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public native @ByRef Operation operation(); public native MaxPoolGradGrad operation(Operation operation); public native @ByRef Output output(); public native MaxPoolGradGrad output(Output output); } /** Computes second-order gradients of the maxpooling function. * * Arguments: * * scope: A Scope object * * orig_input: The original input tensor. * * orig_output: The original output tensor. * * grad: 4-D. Gradients of gradients w.r.t. the input of {@code max_pool}. * * ksize: The size of the window for each dimension of the input tensor. * * strides: The stride of the sliding window for each dimension of the * input tensor. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * data_format: Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, in_channels, in_height, in_width]. * * Returns: * * {@code Output}: Gradients of gradients w.r.t. the input to {@code max_pool}. */ @Namespace("tensorflow::ops") @NoOffset public static class MaxPoolGradGradV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPoolGradGradV2(Pointer p) { super(p); } /** Optional attribute setters for MaxPoolGradGradV2 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, in_channels, in_height, in_width]. * * Defaults to "NHWC" */ public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); } public MaxPoolGradGradV2(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ByVal Input ksize, @ByVal Input strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ByVal Input ksize, @ByVal Input strides, @StringPiece BytePointer padding); public MaxPoolGradGradV2(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ByVal Input ksize, @ByVal Input strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ByVal Input ksize, @ByVal Input strides, @StringPiece String padding); public MaxPoolGradGradV2(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ByVal Input ksize, @ByVal Input strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ByVal Input ksize, @ByVal Input strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPoolGradGradV2(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ByVal Input ksize, @ByVal Input strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ByVal Input ksize, @ByVal Input strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public native @ByRef Operation operation(); public native MaxPoolGradGradV2 operation(Operation operation); public native @ByRef Output output(); public native MaxPoolGradGradV2 output(Output output); } /** Computes second-order gradients of the maxpooling function. * * Arguments: * * scope: A Scope object * * input: The original input. * * grad: 4-D with shape {@code [batch, height, width, channels]}. Gradients w.r.t. the * input of {@code max_pool}. * * argmax: The indices of the maximum values chosen for each output of {@code max_pool}. * * ksize: The size of the window for each dimension of the input tensor. * * strides: The stride of the sliding window for each dimension of the * input tensor. * * padding: The type of padding algorithm to use. * * Returns: * * {@code Output}: Gradients of gradients w.r.t. the input of {@code max_pool}. */ @Namespace("tensorflow::ops") @NoOffset public static class MaxPoolGradGradWithArgmax extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPoolGradGradWithArgmax(Pointer p) { super(p); } public MaxPoolGradGradWithArgmax(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input grad, @ByVal Input argmax, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, grad, argmax, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input grad, @ByVal Input argmax, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public MaxPoolGradGradWithArgmax(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input grad, @ByVal Input argmax, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, grad, argmax, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input grad, @ByVal Input argmax, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding); public MaxPoolGradGradWithArgmax(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input grad, @ByVal Input argmax, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, grad, argmax, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input grad, @ByVal Input argmax, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding); public MaxPoolGradGradWithArgmax(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input grad, @ByVal Input argmax, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, grad, argmax, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input grad, @ByVal Input argmax, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding); public MaxPoolGradGradWithArgmax(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input grad, @ByVal Input argmax, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, grad, argmax, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input grad, @ByVal Input argmax, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public MaxPoolGradGradWithArgmax(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input grad, @ByVal Input argmax, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, grad, argmax, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input grad, @ByVal Input argmax, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native MaxPoolGradGradWithArgmax operation(Operation operation); public native @ByRef Output output(); public native MaxPoolGradGradWithArgmax output(Output output); } /** Computes gradients of the maxpooling function. * * Arguments: * * scope: A Scope object * * orig_input: The original input tensor. * * orig_output: The original output tensor. * * grad: 4-D. Gradients w.r.t. the output of {@code max_pool}. * * ksize: The size of the window for each dimension of the input tensor. * * strides: The stride of the sliding window for each dimension of the * input tensor. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * data_format: Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, in_channels, in_height, in_width]. * * Returns: * * {@code Output}: Gradients w.r.t. the input to {@code max_pool}. */ @Namespace("tensorflow::ops") @NoOffset public static class MaxPoolGradV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPoolGradV2(Pointer p) { super(p); } /** Optional attribute setters for MaxPoolGradV2 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, in_channels, in_height, in_width]. * * Defaults to "NHWC" */ public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); } public MaxPoolGradV2(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ByVal Input ksize, @ByVal Input strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ByVal Input ksize, @ByVal Input strides, @StringPiece BytePointer padding); public MaxPoolGradV2(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ByVal Input ksize, @ByVal Input strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ByVal Input ksize, @ByVal Input strides, @StringPiece String padding); public MaxPoolGradV2(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ByVal Input ksize, @ByVal Input strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ByVal Input ksize, @ByVal Input strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPoolGradV2(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ByVal Input ksize, @ByVal Input strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, orig_input, orig_output, grad, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input orig_input, @ByVal Input orig_output, @ByVal Input grad, @ByVal Input ksize, @ByVal Input strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public native @ByRef Operation operation(); public native MaxPoolGradV2 operation(Operation operation); public native @ByRef Output output(); public native MaxPoolGradV2 output(Output output); } /** Performs max pooling on the input. * * Arguments: * * scope: A Scope object * * input: 4-D input to pool over. * * ksize: The size of the window for each dimension of the input tensor. * * strides: The stride of the sliding window for each dimension of the * input tensor. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * data_format: Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, in_channels, in_height, in_width]. * * Returns: * * {@code Output}: The max pooled output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class MaxPoolV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPoolV2(Pointer p) { super(p); } /** Optional attribute setters for MaxPoolV2 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: * [batch, in_channels, in_height, in_width]. * * Defaults to "NHWC" */ public native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public native @ByVal Attrs DataFormat(@StringPiece String x); public native @StringPiece BytePointer data_format_(); public native Attrs data_format_(BytePointer data_format_); } public MaxPoolV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input ksize, @ByVal Input strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input ksize, @ByVal Input strides, @StringPiece BytePointer padding); public MaxPoolV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input ksize, @ByVal Input strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input ksize, @ByVal Input strides, @StringPiece String padding); public MaxPoolV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input ksize, @ByVal Input strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input ksize, @ByVal Input strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPoolV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input ksize, @ByVal Input strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input ksize, @ByVal Input strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs DataFormat(@StringPiece BytePointer x); public static native @ByVal Attrs DataFormat(@StringPiece String x); public native @ByRef Operation operation(); public native MaxPoolV2 operation(Operation operation); public native @ByRef Output output(); public native MaxPoolV2 output(Output output); } /** Performs max pooling on the input and outputs both max values and indices. * * The indices in {@code argmax} are flattened, so that a maximum value at position * {@code [b, y, x, c]} becomes flattened index * {@code ((b * height + y) * width + x) * channels + c}. * * The indices returned are always in {@code [0, height) x [0, width)} before flattening, * even if padding is involved and the mathematically correct answer is outside * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. * * Arguments: * * scope: A Scope object * * input: 4-D with shape {@code [batch, height, width, channels]}. Input to pool over. * * ksize: The size of the window for each dimension of the input tensor. * * strides: The stride of the sliding window for each dimension of the * input tensor. * * padding: The type of padding algorithm to use. * * Returns: * * {@code Output} output: The max pooled output tensor. * * {@code Output} argmax: 4-D. The flattened indices of the max values chosen for each output. */ @Namespace("tensorflow::ops") @NoOffset public static class MaxPoolWithArgmax extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPoolWithArgmax(Pointer p) { super(p); } /** Optional attribute setters for MaxPoolWithArgmax */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_INT64 */ public native @ByVal Attrs Targmax(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int Targmax_(); public native Attrs Targmax_(int Targmax_); } public MaxPoolWithArgmax(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public MaxPoolWithArgmax(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding); public MaxPoolWithArgmax(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding); public MaxPoolWithArgmax(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding); public MaxPoolWithArgmax(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public MaxPoolWithArgmax(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding); public MaxPoolWithArgmax(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPoolWithArgmax(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public MaxPoolWithArgmax(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPoolWithArgmax(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public MaxPoolWithArgmax(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public MaxPoolWithArgmax(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, ksize, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Targmax(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native MaxPoolWithArgmax operation(Operation operation); public native @ByRef Output output(); public native MaxPoolWithArgmax output(Output output); public native @ByRef Output argmax(); public native MaxPoolWithArgmax argmax(Output argmax); } /** Finds values of the {@code n}-th order statistic for the last dimension. * * If the input is a vector (rank-1), finds the entries which is the nth-smallest * value in the vector and outputs their values as scalar tensor. * * For matrices (resp. higher rank input), computes the entries which is the * nth-smallest value in each row (resp. vector along the last dimension). Thus, * * values.shape = input.shape[:-1] * * Arguments: * * scope: A Scope object * * input: 1-D or higher with last dimension at least {@code n+1}. * * n: 0-D. Position of sorted vector to select along the last dimension (along * each row for matrices). Valid range of n is {@code [0, input.shape[:-1])} * * Optional attributes (see {@code Attrs}): * * reverse: When set to True, find the nth-largest value in the vector and vice * versa. * * Returns: * * {@code Output}: The {@code n}-th order statistic along each last dimensional slice. */ @Namespace("tensorflow::ops") @NoOffset public static class NthElement extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NthElement(Pointer p) { super(p); } /** Optional attribute setters for NthElement */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** When set to True, find the nth-largest value in the vector and vice * versa. * * Defaults to false */ public native @ByVal Attrs Reverse(@Cast("bool") boolean x); public native @Cast("bool") boolean reverse_(); public native Attrs reverse_(boolean reverse_); } public NthElement(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input n) { super((Pointer)null); allocate(scope, input, n); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input n); public NthElement(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input n, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, n, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input n, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Reverse(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native NthElement operation(Operation operation); public native @ByRef Output values(); public native NthElement values(Output values); } /** Produces the average pool of the input tensor for quantized types. * * Arguments: * * scope: A Scope object * * input: 4-D with shape {@code [batch, height, width, channels]}. * * min_input: The float value that the lowest quantized input value represents. * * max_input: The float value that the highest quantized input value represents. * * ksize: The size of the window for each dimension of the input tensor. * The length must be 4 to match the number of dimensions of the input. * * strides: The stride of the sliding window for each dimension of the input * tensor. The length must be 4 to match the number of dimensions of the input. * * padding: The type of padding algorithm to use. * * Returns: * * {@code Output} output * * {@code Output} min_output: The float value that the lowest quantized output value represents. * * {@code Output} max_output: The float value that the highest quantized output value represents. */ @Namespace("tensorflow::ops") @NoOffset public static class QuantizedAvgPool extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QuantizedAvgPool(Pointer p) { super(p); } public QuantizedAvgPool(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, min_input, max_input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public QuantizedAvgPool(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, min_input, max_input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding); public QuantizedAvgPool(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, min_input, max_input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding); public QuantizedAvgPool(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, min_input, max_input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding); public QuantizedAvgPool(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, min_input, max_input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public QuantizedAvgPool(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, min_input, max_input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding); public native @ByRef Operation operation(); public native QuantizedAvgPool operation(Operation operation); public native @ByRef Output output(); public native QuantizedAvgPool output(Output output); public native @ByRef Output min_output(); public native QuantizedAvgPool min_output(Output min_output); public native @ByRef Output max_output(); public native QuantizedAvgPool max_output(Output max_output); } /** Quantized Batch normalization. * * This op is deprecated and will be removed in the future. Prefer * {@code tf.nn.batch_normalization}. * * Arguments: * * scope: A Scope object * * t: A 4D input Tensor. * * t_min: The value represented by the lowest quantized input. * * t_max: The value represented by the highest quantized input. * * m: A 1D mean Tensor with size matching the last dimension of t. * This is the first output from tf.nn.moments, * or a saved moving average thereof. * * m_min: The value represented by the lowest quantized mean. * * m_max: The value represented by the highest quantized mean. * * v: A 1D variance Tensor with size matching the last dimension of t. * This is the second output from tf.nn.moments, * or a saved moving average thereof. * * v_min: The value represented by the lowest quantized variance. * * v_max: The value represented by the highest quantized variance. * * beta: A 1D beta Tensor with size matching the last dimension of t. * An offset to be added to the normalized tensor. * * beta_min: The value represented by the lowest quantized offset. * * beta_max: The value represented by the highest quantized offset. * * gamma: A 1D gamma Tensor with size matching the last dimension of t. * If "scale_after_normalization" is true, this tensor will be multiplied * with the normalized tensor. * * gamma_min: The value represented by the lowest quantized gamma. * * gamma_max: The value represented by the highest quantized gamma. * * variance_epsilon: A small float number to avoid dividing by 0. * * scale_after_normalization: A bool indicating whether the resulted tensor * needs to be multiplied with gamma. * * Returns: * * {@code Output} result * * {@code Output} result_min * * {@code Output} result_max */ @Namespace("tensorflow::ops") @NoOffset public static class QuantizedBatchNormWithGlobalNormalization extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QuantizedBatchNormWithGlobalNormalization(Pointer p) { super(p); } public QuantizedBatchNormWithGlobalNormalization(@Const @ByRef Scope scope, @ByVal Input t, @ByVal Input t_min, @ByVal Input t_max, @ByVal Input m, @ByVal Input m_min, @ByVal Input m_max, @ByVal Input v, @ByVal Input v_min, @ByVal Input v_max, @ByVal Input beta, @ByVal Input beta_min, @ByVal Input beta_max, @ByVal Input gamma, @ByVal Input gamma_min, @ByVal Input gamma_max, @Cast("tensorflow::DataType") int out_type, float variance_epsilon, @Cast("bool") boolean scale_after_normalization) { super((Pointer)null); allocate(scope, t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max, out_type, variance_epsilon, scale_after_normalization); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input t, @ByVal Input t_min, @ByVal Input t_max, @ByVal Input m, @ByVal Input m_min, @ByVal Input m_max, @ByVal Input v, @ByVal Input v_min, @ByVal Input v_max, @ByVal Input beta, @ByVal Input beta_min, @ByVal Input beta_max, @ByVal Input gamma, @ByVal Input gamma_min, @ByVal Input gamma_max, @Cast("tensorflow::DataType") int out_type, float variance_epsilon, @Cast("bool") boolean scale_after_normalization); public native @ByRef Operation operation(); public native QuantizedBatchNormWithGlobalNormalization operation(Operation operation); public native @ByRef Output result(); public native QuantizedBatchNormWithGlobalNormalization result(Output result); public native @ByRef Output result_min(); public native QuantizedBatchNormWithGlobalNormalization result_min(Output result_min); public native @ByRef Output result_max(); public native QuantizedBatchNormWithGlobalNormalization result_max(Output result_max); } /** Adds Tensor 'bias' to Tensor 'input' for Quantized types. * * Broadcasts the values of bias on dimensions 0..N-2 of 'input'. * * Arguments: * * scope: A Scope object * * bias: A 1D bias Tensor with size matching the last dimension of 'input'. * * min_input: The float value that the lowest quantized input value represents. * * max_input: The float value that the highest quantized input value represents. * * min_bias: The float value that the lowest quantized bias value represents. * * max_bias: The float value that the highest quantized bias value represents. * * Returns: * * {@code Output} output * * {@code Output} min_out: The float value that the lowest quantized output value represents. * * {@code Output} max_out: The float value that the highest quantized output value represents. */ @Namespace("tensorflow::ops") @NoOffset public static class QuantizedBiasAdd extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QuantizedBiasAdd(Pointer p) { super(p); } public QuantizedBiasAdd(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input bias, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_bias, @ByVal Input max_bias, @Cast("tensorflow::DataType") int out_type) { super((Pointer)null); allocate(scope, input, bias, min_input, max_input, min_bias, max_bias, out_type); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input bias, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_bias, @ByVal Input max_bias, @Cast("tensorflow::DataType") int out_type); public native @ByRef Operation operation(); public native QuantizedBiasAdd operation(Operation operation); public native @ByRef Output output(); public native QuantizedBiasAdd output(Output output); public native @ByRef Output min_out(); public native QuantizedBiasAdd min_out(Output min_out); public native @ByRef Output max_out(); public native QuantizedBiasAdd max_out(Output max_out); } /** Computes a 2D convolution given quantized 4D input and filter tensors. * * The inputs are quantized tensors where the lowest value represents the real * number of the associated minimum, and the highest represents the maximum. * This means that you can only interpret the quantized output in the same way, by * taking the returned minimum and maximum values into account. * * Arguments: * * scope: A Scope object * * filter: filter's input_depth dimension must match input's depth dimensions. * * min_input: The float value that the lowest quantized input value represents. * * max_input: The float value that the highest quantized input value represents. * * min_filter: The float value that the lowest quantized filter value represents. * * max_filter: The float value that the highest quantized filter value represents. * * strides: The stride of the sliding window for each dimension of the input * tensor. * * padding: The type of padding algorithm to use. * * Optional attributes (see {@code Attrs}): * * dilations: 1-D tensor of length 4. The dilation factor for each dimension of * {@code input}. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the * value of {@code data_format}, see above for details. Dilations in the batch and * depth dimensions must be 1. * * Returns: * * {@code Output} output * * {@code Output} min_output: The float value that the lowest quantized output value represents. * * {@code Output} max_output: The float value that the highest quantized output value represents. */ @Namespace("tensorflow::ops") @NoOffset public static class QuantizedConv2D extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QuantizedConv2D(Pointer p) { super(p); } /** Optional attribute setters for QuantizedConv2D */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_QINT32 */ /// public native @ByVal Attrs OutType(@Cast("tensorflow::DataType") int x); /** 1-D tensor of length 4. The dilation factor for each dimension of * {@code input}. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the * value of {@code data_format}, see above for details. Dilations in the batch and * depth dimensions must be 1. * * Defaults to [1, 1, 1, 1] */ public native @ByVal Attrs Dilations(@ArraySlice IntPointer x); public native @ByVal Attrs Dilations(@ArraySlice IntBuffer x); public native @ByVal Attrs Dilations(@ArraySlice int... x); public native @Cast("tensorflow::DataType") int out_type_(); public native Attrs out_type_(int out_type_); public native @ArraySlice IntPointer dilations_(); public native Attrs dilations_(IntPointer dilations_); } public QuantizedConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, min_input, max_input, min_filter, max_filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public QuantizedConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, min_input, max_input, min_filter, max_filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice IntBuffer strides, @StringPiece String padding); public QuantizedConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, min_input, max_input, min_filter, max_filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice int[] strides, @StringPiece BytePointer padding); public QuantizedConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, min_input, max_input, min_filter, max_filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice IntPointer strides, @StringPiece String padding); public QuantizedConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, filter, min_input, max_input, min_filter, max_filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public QuantizedConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, filter, min_input, max_input, min_filter, max_filter, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice int[] strides, @StringPiece String padding); public QuantizedConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, min_input, max_input, min_filter, max_filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice IntPointer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public QuantizedConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, min_input, max_input, min_filter, max_filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice IntBuffer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public QuantizedConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, min_input, max_input, min_filter, max_filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice int[] strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public QuantizedConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, min_input, max_input, min_filter, max_filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice IntPointer strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public QuantizedConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, min_input, max_input, min_filter, max_filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding, @Const @ByRef Attrs attrs); public QuantizedConv2D(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, filter, min_input, max_input, min_filter, max_filter, strides, padding, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input filter, @ByVal Input min_input, @ByVal Input max_input, @ByVal Input min_filter, @ByVal Input max_filter, @ArraySlice int[] strides, @StringPiece String padding, @Const @ByRef Attrs attrs); public static native @ByVal Attrs OutType(@Cast("tensorflow::DataType") int x); public static native @ByVal Attrs Dilations(@ArraySlice IntPointer x); public static native @ByVal Attrs Dilations(@ArraySlice IntBuffer x); public static native @ByVal Attrs Dilations(@ArraySlice int... x); public native @ByRef Operation operation(); public native QuantizedConv2D operation(Operation operation); public native @ByRef Output output(); public native QuantizedConv2D output(Output output); public native @ByRef Output min_output(); public native QuantizedConv2D min_output(Output min_output); public native @ByRef Output max_output(); public native QuantizedConv2D max_output(Output max_output); } /** Produces the max pool of the input tensor for quantized types. * * Arguments: * * scope: A Scope object * * input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. * * min_input: The float value that the lowest quantized input value represents. * * max_input: The float value that the highest quantized input value represents. * * ksize: The size of the window for each dimension of the input tensor. * The length must be 4 to match the number of dimensions of the input. * * strides: The stride of the sliding window for each dimension of the input * tensor. The length must be 4 to match the number of dimensions of the input. * * padding: The type of padding algorithm to use. * * Returns: * * {@code Output} output * * {@code Output} min_output: The float value that the lowest quantized output value represents. * * {@code Output} max_output: The float value that the highest quantized output value represents. */ @Namespace("tensorflow::ops") @NoOffset public static class QuantizedMaxPool extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QuantizedMaxPool(Pointer p) { super(p); } public QuantizedMaxPool(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, min_input, max_input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece BytePointer padding); public QuantizedMaxPool(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, min_input, max_input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece String padding); public QuantizedMaxPool(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, min_input, max_input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece BytePointer padding); public QuantizedMaxPool(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, min_input, max_input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice IntPointer ksize, @ArraySlice IntPointer strides, @StringPiece String padding); public QuantizedMaxPool(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding) { super((Pointer)null); allocate(scope, input, min_input, max_input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice IntBuffer ksize, @ArraySlice IntBuffer strides, @StringPiece BytePointer padding); public QuantizedMaxPool(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding) { super((Pointer)null); allocate(scope, input, min_input, max_input, ksize, strides, padding); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input min_input, @ByVal Input max_input, @ArraySlice int[] ksize, @ArraySlice int[] strides, @StringPiece String padding); public native @ByRef Operation operation(); public native QuantizedMaxPool operation(Operation operation); public native @ByRef Output output(); public native QuantizedMaxPool output(Output output); public native @ByRef Output min_output(); public native QuantizedMaxPool min_output(Output min_output); public native @ByRef Output max_output(); public native QuantizedMaxPool max_output(Output max_output); } /** Computes Quantized Rectified Linear: {@code max(features, 0)} * * Arguments: * * scope: A Scope object * * min_features: The float value that the lowest quantized value represents. * * max_features: The float value that the highest quantized value represents. * * Returns: * * {@code Output} activations: Has the same output shape as "features". * * {@code Output} min_activations: The float value that the lowest quantized value represents. * * {@code Output} max_activations: The float value that the highest quantized value represents. */ @Namespace("tensorflow::ops") @NoOffset public static class QuantizedRelu extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QuantizedRelu(Pointer p) { super(p); } /** Optional attribute setters for QuantizedRelu */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_QUINT8 */ public native @ByVal Attrs OutType(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int out_type_(); public native Attrs out_type_(int out_type_); } public QuantizedRelu(@Const @ByRef Scope scope, @ByVal Input features, @ByVal Input min_features, @ByVal Input max_features) { super((Pointer)null); allocate(scope, features, min_features, max_features); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input features, @ByVal Input min_features, @ByVal Input max_features); public QuantizedRelu(@Const @ByRef Scope scope, @ByVal Input features, @ByVal Input min_features, @ByVal Input max_features, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, features, min_features, max_features, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input features, @ByVal Input min_features, @ByVal Input max_features, @Const @ByRef Attrs attrs); public static native @ByVal Attrs OutType(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native QuantizedRelu operation(Operation operation); public native @ByRef Output activations(); public native QuantizedRelu activations(Output activations); public native @ByRef Output min_activations(); public native QuantizedRelu min_activations(Output min_activations); public native @ByRef Output max_activations(); public native QuantizedRelu max_activations(Output max_activations); } /** Computes Quantized Rectified Linear 6: {@code min(max(features, 0), 6)} * * Arguments: * * scope: A Scope object * * min_features: The float value that the lowest quantized value represents. * * max_features: The float value that the highest quantized value represents. * * Returns: * * {@code Output} activations: Has the same output shape as "features". * * {@code Output} min_activations: The float value that the lowest quantized value represents. * * {@code Output} max_activations: The float value that the highest quantized value represents. */ @Namespace("tensorflow::ops") @NoOffset public static class QuantizedRelu6 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QuantizedRelu6(Pointer p) { super(p); } /** Optional attribute setters for QuantizedRelu6 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_QUINT8 */ public native @ByVal Attrs OutType(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int out_type_(); public native Attrs out_type_(int out_type_); } public QuantizedRelu6(@Const @ByRef Scope scope, @ByVal Input features, @ByVal Input min_features, @ByVal Input max_features) { super((Pointer)null); allocate(scope, features, min_features, max_features); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input features, @ByVal Input min_features, @ByVal Input max_features); public QuantizedRelu6(@Const @ByRef Scope scope, @ByVal Input features, @ByVal Input min_features, @ByVal Input max_features, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, features, min_features, max_features, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input features, @ByVal Input min_features, @ByVal Input max_features, @Const @ByRef Attrs attrs); public static native @ByVal Attrs OutType(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native QuantizedRelu6 operation(Operation operation); public native @ByRef Output activations(); public native QuantizedRelu6 activations(Output activations); public native @ByRef Output min_activations(); public native QuantizedRelu6 min_activations(Output min_activations); public native @ByRef Output max_activations(); public native QuantizedRelu6 max_activations(Output max_activations); } /** Computes Quantized Rectified Linear X: {@code min(max(features, 0), max_value)} * * Arguments: * * scope: A Scope object * * min_features: The float value that the lowest quantized value represents. * * max_features: The float value that the highest quantized value represents. * * Returns: * * {@code Output} activations: Has the same output shape as "features". * * {@code Output} min_activations: The float value that the lowest quantized value represents. * * {@code Output} max_activations: The float value that the highest quantized value represents. */ @Namespace("tensorflow::ops") @NoOffset public static class QuantizedReluX extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public QuantizedReluX(Pointer p) { super(p); } /** Optional attribute setters for QuantizedReluX */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to DT_QUINT8 */ public native @ByVal Attrs OutType(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int out_type_(); public native Attrs out_type_(int out_type_); } public QuantizedReluX(@Const @ByRef Scope scope, @ByVal Input features, @ByVal Input max_value, @ByVal Input min_features, @ByVal Input max_features) { super((Pointer)null); allocate(scope, features, max_value, min_features, max_features); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input features, @ByVal Input max_value, @ByVal Input min_features, @ByVal Input max_features); public QuantizedReluX(@Const @ByRef Scope scope, @ByVal Input features, @ByVal Input max_value, @ByVal Input min_features, @ByVal Input max_features, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, features, max_value, min_features, max_features, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input features, @ByVal Input max_value, @ByVal Input min_features, @ByVal Input max_features, @Const @ByRef Attrs attrs); public static native @ByVal Attrs OutType(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native QuantizedReluX operation(Operation operation); public native @ByRef Output activations(); public native QuantizedReluX activations(Output activations); public native @ByRef Output min_activations(); public native QuantizedReluX min_activations(Output min_activations); public native @ByRef Output max_activations(); public native QuantizedReluX max_activations(Output max_activations); } /** Computes rectified linear: {@code max(features, 0)}. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The activations tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Relu extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Relu(Pointer p) { super(p); } public Relu(@Const @ByRef Scope scope, @ByVal Input features) { super((Pointer)null); allocate(scope, features); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input features); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Relu operation(Operation operation); public native @ByRef Output activations(); public native Relu activations(Output activations); } /** Computes rectified linear 6: {@code min(max(features, 0), 6)}. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The activations tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Relu6 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Relu6(Pointer p) { super(p); } public Relu6(@Const @ByRef Scope scope, @ByVal Input features) { super((Pointer)null); allocate(scope, features); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input features); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Relu6 operation(Operation operation); public native @ByRef Output activations(); public native Relu6 activations(Output activations); } /** Computes scaled exponential linear: {@code scale * alpha * (exp(features) - 1)} * * if < 0, {@code scale * features} otherwise. * * To be used together with * {@code initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')}. * For correct dropout, use {@code tf.contrib.nn.alpha_dropout}. * * See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The activations tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Selu extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Selu(Pointer p) { super(p); } public Selu(@Const @ByRef Scope scope, @ByVal Input features) { super((Pointer)null); allocate(scope, features); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input features); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Selu operation(Operation operation); public native @ByRef Output activations(); public native Selu activations(Output activations); } /** Computes softmax activations. * * For each batch {@code i} and class {@code j} we have * * $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$ * * Arguments: * * scope: A Scope object * * logits: 2-D with shape {@code [batch_size, num_classes]}. * * Returns: * * {@code Output}: Same shape as {@code logits}. */ @Namespace("tensorflow::ops") @NoOffset public static class Softmax extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Softmax(Pointer p) { super(p); } public Softmax(@Const @ByRef Scope scope, @ByVal Input logits) { super((Pointer)null); allocate(scope, logits); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input logits); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Softmax operation(Operation operation); public native @ByRef Output softmax(); public native Softmax softmax(Output softmax); } /** Computes softmax cross entropy cost and gradients to backpropagate. * * Inputs are the logits, not probabilities. * * Arguments: * * scope: A Scope object * * features: batch_size x num_classes matrix * * labels: batch_size x num_classes matrix * The caller must ensure that each batch of labels represents a valid * probability distribution. * * Returns: * * {@code Output} loss: Per example loss (batch_size vector). * * {@code Output} backprop: backpropagated gradients (batch_size x num_classes matrix). */ @Namespace("tensorflow::ops") @NoOffset public static class SoftmaxCrossEntropyWithLogits extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftmaxCrossEntropyWithLogits(Pointer p) { super(p); } public SoftmaxCrossEntropyWithLogits(@Const @ByRef Scope scope, @ByVal Input features, @ByVal Input labels) { super((Pointer)null); allocate(scope, features, labels); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input features, @ByVal Input labels); public native @ByRef Operation operation(); public native SoftmaxCrossEntropyWithLogits operation(Operation operation); public native @ByRef Output loss(); public native SoftmaxCrossEntropyWithLogits loss(Output loss); public native @ByRef Output backprop(); public native SoftmaxCrossEntropyWithLogits backprop(Output backprop); } /** Computes softplus: {@code log(exp(features) + 1)}. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The activations tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Softplus extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Softplus(Pointer p) { super(p); } public Softplus(@Const @ByRef Scope scope, @ByVal Input features) { super((Pointer)null); allocate(scope, features); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input features); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Softplus operation(Operation operation); public native @ByRef Output activations(); public native Softplus activations(Output activations); } /** Computes softsign: {@code features / (abs(features) + 1)}. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The activations tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Softsign extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Softsign(Pointer p) { super(p); } public Softsign(@Const @ByRef Scope scope, @ByVal Input features) { super((Pointer)null); allocate(scope, features); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input features); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Softsign operation(Operation operation); public native @ByRef Output activations(); public native Softsign activations(Output activations); } /** Computes softmax cross entropy cost and gradients to backpropagate. * * Unlike {@code SoftmaxCrossEntropyWithLogits}, this operation does not accept * a matrix of label probabilities, but rather a single label per row * of features. This label is considered to have probability 1.0 for the * given row. * * Inputs are the logits, not probabilities. * * Arguments: * * scope: A Scope object * * features: batch_size x num_classes matrix * * labels: batch_size vector with values in [0, num_classes). * This is the label for the given minibatch entry. * * Returns: * * {@code Output} loss: Per example loss (batch_size vector). * * {@code Output} backprop: backpropagated gradients (batch_size x num_classes matrix). */ @Namespace("tensorflow::ops") @NoOffset public static class SparseSoftmaxCrossEntropyWithLogits extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseSoftmaxCrossEntropyWithLogits(Pointer p) { super(p); } public SparseSoftmaxCrossEntropyWithLogits(@Const @ByRef Scope scope, @ByVal Input features, @ByVal Input labels) { super((Pointer)null); allocate(scope, features, labels); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input features, @ByVal Input labels); public native @ByRef Operation operation(); public native SparseSoftmaxCrossEntropyWithLogits operation(Operation operation); public native @ByRef Output loss(); public native SparseSoftmaxCrossEntropyWithLogits loss(Output loss); public native @ByRef Output backprop(); public native SparseSoftmaxCrossEntropyWithLogits backprop(Output backprop); } /** Finds values and indices of the {@code k} largest elements for the last dimension. * * If the input is a vector (rank-1), finds the {@code k} largest entries in the vector * and outputs their values and indices as vectors. Thus {@code values[j]} is the * {@code j}-th largest entry in {@code input}, and its index is {@code indices[j]}. * * For matrices (resp. higher rank input), computes the top {@code k} entries in each * row (resp. vector along the last dimension). Thus, * * values.shape = indices.shape = input.shape[:-1] + [k] * * If two elements are equal, the lower-index element appears first. * * Arguments: * * scope: A Scope object * * input: 1-D or higher with last dimension at least {@code k}. * * k: 0-D. Number of top elements to look for along the last dimension (along each * row for matrices). * * Optional attributes (see {@code Attrs}): * * sorted: If true the resulting {@code k} elements will be sorted by the values in * descending order. * * Returns: * * {@code Output} values: The {@code k} largest elements along each last dimensional slice. * * {@code Output} indices: The indices of {@code values} within the last dimension of {@code input}. */ @Namespace("tensorflow::ops") @NoOffset public static class TopK extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TopK(Pointer p) { super(p); } /** Optional attribute setters for TopK */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true the resulting {@code k} elements will be sorted by the values in * descending order. * * Defaults to true */ public native @ByVal Attrs Sorted(@Cast("bool") boolean x); public native @Cast("bool") boolean sorted_(); public native Attrs sorted_(boolean sorted_); } public TopK(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input k) { super((Pointer)null); allocate(scope, input, k); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input k); public TopK(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input k, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, k, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input k, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Sorted(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native TopK operation(Operation operation); public native @ByRef Output values(); public native TopK values(Output values); public native @ByRef Output indices(); public native TopK indices(Output indices); } /** \} */ // namespace ops // namespace tensorflow // #endif // TENSORFLOW_CC_OPS_NN_OPS_H_ // Parsed from tensorflow/cc/ops/no_op.h // This file is MACHINE GENERATED! Do not edit. // #ifndef TENSORFLOW_CC_OPS_NO_OP_H_ // #define TENSORFLOW_CC_OPS_NO_OP_H_ // This file is MACHINE GENERATED! Do not edit. // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/cc/framework/scope.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/gtl/array_slice.h" /** \defgroup no_op No Op * \{

* Does nothing. Only useful as a placeholder for control edges. * * Arguments: * * scope: A Scope object * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class NoOp extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NoOp(Pointer p) { super(p); } public NoOp(@Const @ByRef Scope scope) { super((Pointer)null); allocate(scope); } private native void allocate(@Const @ByRef Scope scope); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public native @ByRef Operation operation(); public native NoOp operation(Operation operation); } /** \} */ // namespace ops // namespace tensorflow // #endif // TENSORFLOW_CC_OPS_NO_OP_H_ // Parsed from tensorflow/cc/ops/parsing_ops.h // This file is MACHINE GENERATED! Do not edit. // #ifndef TENSORFLOW_CC_OPS_PARSING_OPS_H_ // #define TENSORFLOW_CC_OPS_PARSING_OPS_H_ // This file is MACHINE GENERATED! Do not edit. // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/cc/framework/scope.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/gtl/array_slice.h" /** \defgroup parsing_ops Parsing Ops * \{

* Convert CSV records to tensors. Each column maps to one tensor. * * RFC 4180 format is expected for the CSV records. * (https://tools.ietf.org/html/rfc4180) * Note that we allow leading and trailing spaces with int or float field. * * Arguments: * * scope: A Scope object * * records: Each string is a record/row in the csv and all records should have * the same format. * * record_defaults: One tensor per column of the input record, with either a * scalar default value for that column or an empty vector if the column is * required. * * Optional attributes (see {@code Attrs}): * * field_delim: char delimiter to separate fields in a record. * * use_quote_delim: If false, treats double quotation marks as regular * characters inside of the string fields (ignoring RFC 4180, Section 2, * Bullet 5). * * na_value: Additional string to recognize as NA/NaN. * * Returns: * * {@code OutputList}: Each tensor will have the same shape as records. */ @Namespace("tensorflow::ops") @NoOffset public static class DecodeCSV extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DecodeCSV(Pointer p) { super(p); } /** Optional attribute setters for DecodeCSV */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** char delimiter to separate fields in a record. * * Defaults to "," */ /// public native @ByVal Attrs FieldDelim(@StringPiece BytePointer x); public native @ByVal Attrs FieldDelim(@StringPiece String x); /** If false, treats double quotation marks as regular * characters inside of the string fields (ignoring RFC 4180, Section 2, * Bullet 5). * * Defaults to true */ /// public native @ByVal Attrs UseQuoteDelim(@Cast("bool") boolean x); /** Additional string to recognize as NA/NaN. * * Defaults to "" */ public native @ByVal Attrs NaValue(@StringPiece BytePointer x); public native @ByVal Attrs NaValue(@StringPiece String x); /** Defaults to [] */ public native @ByVal Attrs SelectCols(@ArraySlice IntPointer x); public native @ByVal Attrs SelectCols(@ArraySlice IntBuffer x); public native @ByVal Attrs SelectCols(@ArraySlice int... x); public native @StringPiece BytePointer field_delim_(); public native Attrs field_delim_(BytePointer field_delim_); public native @Cast("bool") boolean use_quote_delim_(); public native Attrs use_quote_delim_(boolean use_quote_delim_); public native @StringPiece BytePointer na_value_(); public native Attrs na_value_(BytePointer na_value_); public native @ArraySlice IntPointer select_cols_(); public native Attrs select_cols_(IntPointer select_cols_); } public DecodeCSV(@Const @ByRef Scope scope, @ByVal Input records, @ByVal InputList record_defaults) { super((Pointer)null); allocate(scope, records, record_defaults); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input records, @ByVal InputList record_defaults); public DecodeCSV(@Const @ByRef Scope scope, @ByVal Input records, @ByVal InputList record_defaults, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, records, record_defaults, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input records, @ByVal InputList record_defaults, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator []") Output get(@Cast("size_t") long index); public static native @ByVal Attrs FieldDelim(@StringPiece BytePointer x); public static native @ByVal Attrs FieldDelim(@StringPiece String x); public static native @ByVal Attrs UseQuoteDelim(@Cast("bool") boolean x); public static native @ByVal Attrs NaValue(@StringPiece BytePointer x); public static native @ByVal Attrs NaValue(@StringPiece String x); public static native @ByVal Attrs SelectCols(@ArraySlice IntPointer x); public static native @ByVal Attrs SelectCols(@ArraySlice IntBuffer x); public static native @ByVal Attrs SelectCols(@ArraySlice int... x); public native @ByRef Operation operation(); public native DecodeCSV operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector output(); public native DecodeCSV output(OutputVector output); } /** Decompress strings. * * This op decompresses each element of the {@code bytes} input {@code Tensor}, which * is assumed to be compressed using the given {@code compression_type}. * * The {@code output} is a string {@code Tensor} of the same shape as {@code bytes}, * each element containing the decompressed data from the corresponding * element in {@code bytes}. * * Arguments: * * scope: A Scope object * * bytes: A Tensor of string which is compressed. * * Optional attributes (see {@code Attrs}): * * compression_type: A scalar containing either (i) the empty string (no * compression), (ii) "ZLIB", or (iii) "GZIP". * * Returns: * * {@code Output}: A Tensor with the same shape as input {@code bytes}, uncompressed * from bytes. */ @Namespace("tensorflow::ops") @NoOffset public static class DecodeCompressed extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DecodeCompressed(Pointer p) { super(p); } /** Optional attribute setters for DecodeCompressed */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** A scalar containing either (i) the empty string (no * compression), (ii) "ZLIB", or (iii) "GZIP". * * Defaults to "" */ public native @ByVal Attrs CompressionType(@StringPiece BytePointer x); public native @ByVal Attrs CompressionType(@StringPiece String x); public native @StringPiece BytePointer compression_type_(); public native Attrs compression_type_(BytePointer compression_type_); } public DecodeCompressed(@Const @ByRef Scope scope, @ByVal Input bytes) { super((Pointer)null); allocate(scope, bytes); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input bytes); public DecodeCompressed(@Const @ByRef Scope scope, @ByVal Input bytes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, bytes, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input bytes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs CompressionType(@StringPiece BytePointer x); public static native @ByVal Attrs CompressionType(@StringPiece String x); public native @ByRef Operation operation(); public native DecodeCompressed operation(Operation operation); public native @ByRef Output output(); public native DecodeCompressed output(Output output); } /** Convert JSON-encoded Example records to binary protocol buffer strings. * * This op translates a tensor containing Example records, encoded using * the [standard JSON * mapping](https://developers.google.com/protocol-buffers/docs/proto3#json), * into a tensor containing the same records encoded as binary protocol * buffers. The resulting tensor can then be fed to any of the other * Example-parsing ops. * * Arguments: * * scope: A Scope object * * json_examples: Each string is a JSON object serialized according to the JSON * mapping of the Example proto. * * Returns: * * {@code Output}: Each string is a binary Example protocol buffer corresponding * to the respective element of {@code json_examples}. */ @Namespace("tensorflow::ops") @NoOffset public static class DecodeJSONExample extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DecodeJSONExample(Pointer p) { super(p); } public DecodeJSONExample(@Const @ByRef Scope scope, @ByVal Input json_examples) { super((Pointer)null); allocate(scope, json_examples); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input json_examples); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native DecodeJSONExample operation(Operation operation); public native @ByRef Output binary_examples(); public native DecodeJSONExample binary_examples(Output binary_examples); } /** Reinterpret the bytes of a string as a vector of numbers. * * Arguments: * * scope: A Scope object * * bytes: All the elements must have the same length. * * Optional attributes (see {@code Attrs}): * * little_endian: Whether the input {@code bytes} are in little-endian order. * Ignored for {@code out_type} values that are stored in a single byte like * {@code uint8}. * * Returns: * * {@code Output}: A Tensor with one more dimension than the input {@code bytes}. The * added dimension will have size equal to the length of the elements * of {@code bytes} divided by the number of bytes to represent {@code out_type}. */ @Namespace("tensorflow::ops") @NoOffset public static class DecodeRaw extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DecodeRaw(Pointer p) { super(p); } /** Optional attribute setters for DecodeRaw */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Whether the input {@code bytes} are in little-endian order. * Ignored for {@code out_type} values that are stored in a single byte like * {@code uint8}. * * Defaults to true */ public native @ByVal Attrs LittleEndian(@Cast("bool") boolean x); public native @Cast("bool") boolean little_endian_(); public native Attrs little_endian_(boolean little_endian_); } public DecodeRaw(@Const @ByRef Scope scope, @ByVal Input bytes, @Cast("tensorflow::DataType") int out_type) { super((Pointer)null); allocate(scope, bytes, out_type); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input bytes, @Cast("tensorflow::DataType") int out_type); public DecodeRaw(@Const @ByRef Scope scope, @ByVal Input bytes, @Cast("tensorflow::DataType") int out_type, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, bytes, out_type, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input bytes, @Cast("tensorflow::DataType") int out_type, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs LittleEndian(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native DecodeRaw operation(Operation operation); public native @ByRef Output output(); public native DecodeRaw output(Output output); } /** Transforms a vector of brain.Example protos (as strings) into typed tensors. * * Arguments: * * scope: A Scope object * * serialized: A vector containing a batch of binary serialized Example protos. * * names: A vector containing the names of the serialized protos. * May contain, for example, table key (descriptive) names for the * corresponding serialized protos. These are purely useful for debugging * purposes, and the presence of values here has no effect on the output. * May also be an empty vector if no names are available. * If non-empty, this vector must be the same length as "serialized". * * sparse_keys: A list of Nsparse string Tensors (scalars). * The keys expected in the Examples' features associated with sparse values. * * dense_keys: A list of Ndense string Tensors (scalars). * The keys expected in the Examples' features associated with dense values. * * dense_defaults: A list of Ndense Tensors (some may be empty). * dense_defaults[j] provides default values * when the example's feature_map lacks dense_key[j]. If an empty Tensor is * provided for dense_defaults[j], then the Feature dense_keys[j] is required. * The input type is inferred from dense_defaults[j], even when it's empty. * If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, * then the shape of dense_defaults[j] must match that of dense_shapes[j]. * If dense_shapes[j] has an undefined major dimension (variable strides dense * feature), dense_defaults[j] must contain a single element: * the padding element. * * sparse_types: A list of Nsparse types; the data types of data in each Feature * given in sparse_keys. * Currently the ParseExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). * * dense_shapes: A list of Ndense shapes; the shapes of data in each Feature * given in dense_keys. * The number of elements in the Feature corresponding to dense_key[j] * must always equal dense_shapes[j].NumEntries(). * If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output * Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN): * The dense outputs are just the inputs row-stacked by batch. * This works for dense_shapes[j] = (-1, D1, ..., DN). In this case * the shape of the output Tensor dense_values[j] will be * (|serialized|, M, D1, .., DN), where M is the maximum number of blocks * of elements of length D1 * .... * DN, across all minibatch entries * in the input. Any minibatch entry with less than M blocks of elements of * length D1 * ... * DN will be padded with the corresponding default_value * scalar element along the second dimension. * * Returns: * * {@code OutputList} sparse_indices * * {@code OutputList} sparse_values * * {@code OutputList} sparse_shapes * * {@code OutputList} dense_values */ @Namespace("tensorflow::ops") @NoOffset public static class ParseExample extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ParseExample(Pointer p) { super(p); } public ParseExample(@Const @ByRef Scope scope, @ByVal Input serialized, @ByVal Input names, @ByVal InputList sparse_keys, @ByVal InputList dense_keys, @ByVal InputList dense_defaults, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector sparse_types, @ArraySlice PartialTensorShape dense_shapes) { super((Pointer)null); allocate(scope, serialized, names, sparse_keys, dense_keys, dense_defaults, sparse_types, dense_shapes); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input serialized, @ByVal Input names, @ByVal InputList sparse_keys, @ByVal InputList dense_keys, @ByVal InputList dense_defaults, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector sparse_types, @ArraySlice PartialTensorShape dense_shapes); public native @ByRef Operation operation(); public native ParseExample operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector sparse_indices(); public native ParseExample sparse_indices(OutputVector sparse_indices); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector sparse_values(); public native ParseExample sparse_values(OutputVector sparse_values); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector sparse_shapes(); public native ParseExample sparse_shapes(OutputVector sparse_shapes); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector dense_values(); public native ParseExample dense_values(OutputVector dense_values); } /** Transforms a vector of brain.SequenceExample protos (as strings) into typed tensors. * * Arguments: * * scope: A Scope object * * serialized: A vector containing binary serialized SequenceExample protos. * * debug_name: A vector containing the names of the serialized protos. * May contain, for example, table key (descriptive) name for the * corresponding serialized proto. This is purely useful for debugging * purposes, and the presence of values here has no effect on the output. * May also be an empty vector if no name is available. * * context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty). * context_dense_defaults[j] provides default values * when the SequenceExample's context map lacks context_dense_key[j]. * If an empty Tensor is provided for context_dense_defaults[j], * then the Feature context_dense_keys[j] is required. * The input type is inferred from context_dense_defaults[j], even when it's * empty. If context_dense_defaults[j] is not empty, its shape must match * context_dense_shapes[j]. * * feature_list_dense_missing_assumed_empty: A vector listing the * FeatureList keys which may be missing from the SequenceExamples. If the * associated FeatureList is missing, it is treated as empty. By default, * any FeatureList not listed in this vector must exist in the SequenceExamples. * * context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars). * The keys expected in the Examples' features associated with context_sparse * values. * * context_dense_keys: A list of Ncontext_dense string Tensors (scalars). * The keys expected in the SequenceExamples' context features associated with * dense values. * * feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors * (scalars). The keys expected in the FeatureLists associated with sparse * values. * * feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars). * The keys expected in the SequenceExamples' feature_lists associated * with lists of dense values. * * Optional attributes (see {@code Attrs}): * * context_sparse_types: A list of Ncontext_sparse types; the data types of data in * each context Feature given in context_sparse_keys. * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). * * context_dense_shapes: A list of Ncontext_dense shapes; the shapes of data in * each context Feature given in context_dense_keys. * The number of elements in the Feature corresponding to context_dense_key[j] * must always equal context_dense_shapes[j].NumEntries(). * The shape of context_dense_values[j] will match context_dense_shapes[j]. * * feature_list_sparse_types: A list of Nfeature_list_sparse types; the data types * of data in each FeatureList given in feature_list_sparse_keys. * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). * * feature_list_dense_shapes: A list of Nfeature_list_dense shapes; the shapes of * data in each FeatureList given in feature_list_dense_keys. * The shape of each Feature in the FeatureList corresponding to * feature_list_dense_key[j] must always equal * feature_list_dense_shapes[j].NumEntries(). * * Returns: * * {@code OutputList} context_sparse_indices * * {@code OutputList} context_sparse_values * * {@code OutputList} context_sparse_shapes * * {@code OutputList} context_dense_values * * {@code OutputList} feature_list_sparse_indices * * {@code OutputList} feature_list_sparse_values * * {@code OutputList} feature_list_sparse_shapes * * {@code OutputList} feature_list_dense_values * * {@code OutputList} feature_list_dense_lengths */ @Namespace("tensorflow::ops") @NoOffset public static class ParseSequenceExample extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ParseSequenceExample(Pointer p) { super(p); } /** Optional attribute setters for ParseSequenceExample */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Defaults to 0 */ public native @ByVal Attrs NcontextSparse(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs NcontextDense(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ public native @ByVal Attrs NfeatureListSparse(@Cast("tensorflow::int64") long x); /** Defaults to 0 */ /// public native @ByVal Attrs NfeatureListDense(@Cast("tensorflow::int64") long x); /** A list of Ncontext_sparse types; the data types of data in * each context Feature given in context_sparse_keys. * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). * * Defaults to [] */ public native @ByVal Attrs ContextSparseTypes(@Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector x); /** Defaults to [] */ /// public native @ByVal Attrs FeatureListDenseTypes(@Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector x); /** A list of Ncontext_dense shapes; the shapes of data in * each context Feature given in context_dense_keys. * The number of elements in the Feature corresponding to context_dense_key[j] * must always equal context_dense_shapes[j].NumEntries(). * The shape of context_dense_values[j] will match context_dense_shapes[j]. * * Defaults to [] */ /// public native @ByVal Attrs ContextDenseShapes(@ArraySlice PartialTensorShape x); /** A list of Nfeature_list_sparse types; the data types * of data in each FeatureList given in feature_list_sparse_keys. * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). * * Defaults to [] */ /// public native @ByVal Attrs FeatureListSparseTypes(@Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector x); /** A list of Nfeature_list_dense shapes; the shapes of * data in each FeatureList given in feature_list_dense_keys. * The shape of each Feature in the FeatureList corresponding to * feature_list_dense_key[j] must always equal * feature_list_dense_shapes[j].NumEntries(). * * Defaults to [] */ public native @ByVal Attrs FeatureListDenseShapes(@ArraySlice PartialTensorShape x); public native @Cast("tensorflow::int64") long Ncontext_sparse_(); public native Attrs Ncontext_sparse_(long Ncontext_sparse_); public native @Cast("tensorflow::int64") long Ncontext_dense_(); public native Attrs Ncontext_dense_(long Ncontext_dense_); public native @Cast("tensorflow::int64") long Nfeature_list_sparse_(); public native Attrs Nfeature_list_sparse_(long Nfeature_list_sparse_); public native @Cast("tensorflow::int64") long Nfeature_list_dense_(); public native Attrs Nfeature_list_dense_(long Nfeature_list_dense_); public native @ByRef @Cast("tensorflow::DataTypeSlice*") DataTypeVector context_sparse_types_(); public native Attrs context_sparse_types_(DataTypeVector context_sparse_types_); public native @ByRef @Cast("tensorflow::DataTypeSlice*") DataTypeVector feature_list_dense_types_(); public native Attrs feature_list_dense_types_(DataTypeVector feature_list_dense_types_); public native @ArraySlice PartialTensorShape context_dense_shapes_(); public native Attrs context_dense_shapes_(PartialTensorShape context_dense_shapes_); public native @ByRef @Cast("tensorflow::DataTypeSlice*") DataTypeVector feature_list_sparse_types_(); public native Attrs feature_list_sparse_types_(DataTypeVector feature_list_sparse_types_); public native @ArraySlice PartialTensorShape feature_list_dense_shapes_(); public native Attrs feature_list_dense_shapes_(PartialTensorShape feature_list_dense_shapes_); } public ParseSequenceExample(@Const @ByRef Scope scope, @ByVal Input serialized, @ByVal Input debug_name, @ByVal InputList context_dense_defaults, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector feature_list_dense_missing_assumed_empty, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector context_sparse_keys, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector context_dense_keys, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector feature_list_sparse_keys, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector feature_list_dense_keys) { super((Pointer)null); allocate(scope, serialized, debug_name, context_dense_defaults, feature_list_dense_missing_assumed_empty, context_sparse_keys, context_dense_keys, feature_list_sparse_keys, feature_list_dense_keys); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input serialized, @ByVal Input debug_name, @ByVal InputList context_dense_defaults, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector feature_list_dense_missing_assumed_empty, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector context_sparse_keys, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector context_dense_keys, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector feature_list_sparse_keys, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector feature_list_dense_keys); public ParseSequenceExample(@Const @ByRef Scope scope, @ByVal Input serialized, @ByVal Input debug_name, @ByVal InputList context_dense_defaults, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector feature_list_dense_missing_assumed_empty, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector context_sparse_keys, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector context_dense_keys, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector feature_list_sparse_keys, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector feature_list_dense_keys, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, serialized, debug_name, context_dense_defaults, feature_list_dense_missing_assumed_empty, context_sparse_keys, context_dense_keys, feature_list_sparse_keys, feature_list_dense_keys, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input serialized, @ByVal Input debug_name, @ByVal InputList context_dense_defaults, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector feature_list_dense_missing_assumed_empty, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector context_sparse_keys, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector context_dense_keys, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector feature_list_sparse_keys, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector feature_list_dense_keys, @Const @ByRef Attrs attrs); public static native @ByVal Attrs NcontextSparse(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs NcontextDense(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs NfeatureListSparse(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs NfeatureListDense(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs ContextSparseTypes(@Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector x); public static native @ByVal Attrs FeatureListDenseTypes(@Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector x); public static native @ByVal Attrs ContextDenseShapes(@ArraySlice PartialTensorShape x); public static native @ByVal Attrs FeatureListSparseTypes(@Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector x); public static native @ByVal Attrs FeatureListDenseShapes(@ArraySlice PartialTensorShape x); public native @ByRef Operation operation(); public native ParseSequenceExample operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector context_sparse_indices(); public native ParseSequenceExample context_sparse_indices(OutputVector context_sparse_indices); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector context_sparse_values(); public native ParseSequenceExample context_sparse_values(OutputVector context_sparse_values); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector context_sparse_shapes(); public native ParseSequenceExample context_sparse_shapes(OutputVector context_sparse_shapes); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector context_dense_values(); public native ParseSequenceExample context_dense_values(OutputVector context_dense_values); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector feature_list_sparse_indices(); public native ParseSequenceExample feature_list_sparse_indices(OutputVector feature_list_sparse_indices); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector feature_list_sparse_values(); public native ParseSequenceExample feature_list_sparse_values(OutputVector feature_list_sparse_values); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector feature_list_sparse_shapes(); public native ParseSequenceExample feature_list_sparse_shapes(OutputVector feature_list_sparse_shapes); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector feature_list_dense_values(); public native ParseSequenceExample feature_list_dense_values(OutputVector feature_list_dense_values); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector feature_list_dense_lengths(); public native ParseSequenceExample feature_list_dense_lengths(OutputVector feature_list_dense_lengths); } /** Transforms a tf.Example proto (as a string) into typed tensors. * * Arguments: * * scope: A Scope object * * serialized: A vector containing a batch of binary serialized Example protos. * * dense_defaults: A list of Tensors (some may be empty), whose length matches * the length of {@code dense_keys}. dense_defaults[j] provides default values * when the example's feature_map lacks dense_key[j]. If an empty Tensor is * provided for dense_defaults[j], then the Feature dense_keys[j] is required. * The input type is inferred from dense_defaults[j], even when it's empty. * If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, * then the shape of dense_defaults[j] must match that of dense_shapes[j]. * If dense_shapes[j] has an undefined major dimension (variable strides dense * feature), dense_defaults[j] must contain a single element: * the padding element. * * num_sparse: The number of sparse features to be parsed from the example. This * must match the lengths of {@code sparse_keys} and {@code sparse_types}. * * sparse_keys: A list of {@code num_sparse} strings. * The keys expected in the Examples' features associated with sparse values. * * dense_keys: The keys expected in the Examples' features associated with dense * values. * * sparse_types: A list of {@code num_sparse} types; the data types of data in each * Feature given in sparse_keys. * Currently the ParseSingleExample op supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). * * dense_shapes: The shapes of data in each Feature given in dense_keys. * The length of this list must match the length of {@code dense_keys}. The * number of elements in the Feature corresponding to dense_key[j] must * always equal dense_shapes[j].NumEntries(). If dense_shapes[j] == * (D0, D1, ..., DN) then the shape of output Tensor dense_values[j] * will be (D0, D1, ..., DN): In the case dense_shapes[j] = (-1, D1, * ..., DN), the shape of the output Tensor dense_values[j] will be (M, * D1, .., DN), where M is the number of blocks of elements of length * D1 * .... * DN, in the input. * * Returns: * * {@code OutputList} sparse_indices * * {@code OutputList} sparse_values * * {@code OutputList} sparse_shapes * * {@code OutputList} dense_values */ @Namespace("tensorflow::ops") @NoOffset public static class ParseSingleExample extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ParseSingleExample(Pointer p) { super(p); } public ParseSingleExample(@Const @ByRef Scope scope, @ByVal Input serialized, @ByVal InputList dense_defaults, @Cast("tensorflow::int64") long num_sparse, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector sparse_keys, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector dense_keys, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector sparse_types, @ArraySlice PartialTensorShape dense_shapes) { super((Pointer)null); allocate(scope, serialized, dense_defaults, num_sparse, sparse_keys, dense_keys, sparse_types, dense_shapes); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input serialized, @ByVal InputList dense_defaults, @Cast("tensorflow::int64") long num_sparse, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector sparse_keys, @Cast("const tensorflow::gtl::ArraySlice*") @ByRef StringVector dense_keys, @Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector sparse_types, @ArraySlice PartialTensorShape dense_shapes); public native @ByRef Operation operation(); public native ParseSingleExample operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector sparse_indices(); public native ParseSingleExample sparse_indices(OutputVector sparse_indices); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector sparse_values(); public native ParseSingleExample sparse_values(OutputVector sparse_values); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector sparse_shapes(); public native ParseSingleExample sparse_shapes(OutputVector sparse_shapes); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector dense_values(); public native ParseSingleExample dense_values(OutputVector dense_values); } /** Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors. * * Arguments: * * scope: A Scope object * * serialized: A scalar containing a binary serialized SequenceExample proto. * * feature_list_dense_missing_assumed_empty: A vector listing the * FeatureList keys which may be missing from the SequenceExample. If the * associated FeatureList is missing, it is treated as empty. By default, * any FeatureList not listed in this vector must exist in the SequenceExample. * * context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars). * The keys expected in the Examples' features associated with context_sparse * values. * * context_dense_keys: A list of Ncontext_dense string Tensors (scalars). * The keys expected in the SequenceExamples' context features associated with * dense values. * * feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors * (scalars). The keys expected in the FeatureLists associated with sparse * values. * * feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars). * The keys expected in the SequenceExamples' feature_lists associated * with lists of dense values. * * context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty). * context_dense_defaults[j] provides default values * when the SequenceExample's context map lacks context_dense_key[j]. * If an empty Tensor is provided for context_dense_defaults[j], * then the Feature context_dense_keys[j] is required. * The input type is inferred from context_dense_defaults[j], even when it's * empty. If context_dense_defaults[j] is not empty, its shape must match * context_dense_shapes[j]. * * debug_name: A scalar containing the name of the serialized proto. * May contain, for example, table key (descriptive) name for the * corresponding serialized proto. This is purely useful for debugging * purposes, and the presence of values here has no effect on the output. * May also be an empty scalar if no name is available. * * Optional attributes (see {@code Attrs}): * * context_sparse_types: A list of Ncontext_sparse types; the data types of data in * each context Feature given in context_sparse_keys. * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). * * context_dense_shapes: A list of Ncontext_dense shapes; the shapes of data in * each context Feature given in context_dense_keys. * The number of elements in the Feature corresponding to context_dense_key[j] * must always equal context_dense_shapes[j].NumEntries(). * The shape of context_dense_values[j] will match context_dense_shapes[j]. * * feature_list_sparse_types: A list of Nfeature_list_sparse types; the data types * of data in each FeatureList given in feature_list_sparse_keys. * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). * * feature_list_dense_shapes: A list of Nfeature_list_dense shapes; the shapes of * data in each FeatureList given in feature_list_dense_keys. * The shape of each Feature in the FeatureList corresponding to * feature_list_dense_key[j] must always equal * feature_list_dense_shapes[j].NumEntries(). * * Returns: * * {@code OutputList} context_sparse_indices * * {@code OutputList} context_sparse_values * * {@code OutputList} context_sparse_shapes * * {@code OutputList} context_dense_values * * {@code OutputList} feature_list_sparse_indices * * {@code OutputList} feature_list_sparse_values * * {@code OutputList} feature_list_sparse_shapes * * {@code OutputList} feature_list_dense_values */ @Namespace("tensorflow::ops") @NoOffset public static class ParseSingleSequenceExample extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ParseSingleSequenceExample(Pointer p) { super(p); } /** Optional attribute setters for ParseSingleSequenceExample */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** A list of Ncontext_sparse types; the data types of data in * each context Feature given in context_sparse_keys. * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). * * Defaults to [] */ public native @ByVal Attrs ContextSparseTypes(@Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector x); /** Defaults to [] */ /// public native @ByVal Attrs FeatureListDenseTypes(@Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector x); /** A list of Ncontext_dense shapes; the shapes of data in * each context Feature given in context_dense_keys. * The number of elements in the Feature corresponding to context_dense_key[j] * must always equal context_dense_shapes[j].NumEntries(). * The shape of context_dense_values[j] will match context_dense_shapes[j]. * * Defaults to [] */ /// public native @ByVal Attrs ContextDenseShapes(@ArraySlice PartialTensorShape x); /** A list of Nfeature_list_sparse types; the data types * of data in each FeatureList given in feature_list_sparse_keys. * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). * * Defaults to [] */ /// public native @ByVal Attrs FeatureListSparseTypes(@Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector x); /** A list of Nfeature_list_dense shapes; the shapes of * data in each FeatureList given in feature_list_dense_keys. * The shape of each Feature in the FeatureList corresponding to * feature_list_dense_key[j] must always equal * feature_list_dense_shapes[j].NumEntries(). * * Defaults to [] */ public native @ByVal Attrs FeatureListDenseShapes(@ArraySlice PartialTensorShape x); public native @ByRef @Cast("tensorflow::DataTypeSlice*") DataTypeVector context_sparse_types_(); public native Attrs context_sparse_types_(DataTypeVector context_sparse_types_); public native @ByRef @Cast("tensorflow::DataTypeSlice*") DataTypeVector feature_list_dense_types_(); public native Attrs feature_list_dense_types_(DataTypeVector feature_list_dense_types_); public native @ArraySlice PartialTensorShape context_dense_shapes_(); public native Attrs context_dense_shapes_(PartialTensorShape context_dense_shapes_); public native @ByRef @Cast("tensorflow::DataTypeSlice*") DataTypeVector feature_list_sparse_types_(); public native Attrs feature_list_sparse_types_(DataTypeVector feature_list_sparse_types_); public native @ArraySlice PartialTensorShape feature_list_dense_shapes_(); public native Attrs feature_list_dense_shapes_(PartialTensorShape feature_list_dense_shapes_); } public ParseSingleSequenceExample(@Const @ByRef Scope scope, @ByVal Input serialized, @ByVal Input feature_list_dense_missing_assumed_empty, @ByVal InputList context_sparse_keys, @ByVal InputList context_dense_keys, @ByVal InputList feature_list_sparse_keys, @ByVal InputList feature_list_dense_keys, @ByVal InputList context_dense_defaults, @ByVal Input debug_name) { super((Pointer)null); allocate(scope, serialized, feature_list_dense_missing_assumed_empty, context_sparse_keys, context_dense_keys, feature_list_sparse_keys, feature_list_dense_keys, context_dense_defaults, debug_name); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input serialized, @ByVal Input feature_list_dense_missing_assumed_empty, @ByVal InputList context_sparse_keys, @ByVal InputList context_dense_keys, @ByVal InputList feature_list_sparse_keys, @ByVal InputList feature_list_dense_keys, @ByVal InputList context_dense_defaults, @ByVal Input debug_name); public ParseSingleSequenceExample(@Const @ByRef Scope scope, @ByVal Input serialized, @ByVal Input feature_list_dense_missing_assumed_empty, @ByVal InputList context_sparse_keys, @ByVal InputList context_dense_keys, @ByVal InputList feature_list_sparse_keys, @ByVal InputList feature_list_dense_keys, @ByVal InputList context_dense_defaults, @ByVal Input debug_name, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, serialized, feature_list_dense_missing_assumed_empty, context_sparse_keys, context_dense_keys, feature_list_sparse_keys, feature_list_dense_keys, context_dense_defaults, debug_name, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input serialized, @ByVal Input feature_list_dense_missing_assumed_empty, @ByVal InputList context_sparse_keys, @ByVal InputList context_dense_keys, @ByVal InputList feature_list_sparse_keys, @ByVal InputList feature_list_dense_keys, @ByVal InputList context_dense_defaults, @ByVal Input debug_name, @Const @ByRef Attrs attrs); public static native @ByVal Attrs ContextSparseTypes(@Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector x); public static native @ByVal Attrs FeatureListDenseTypes(@Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector x); public static native @ByVal Attrs ContextDenseShapes(@ArraySlice PartialTensorShape x); public static native @ByVal Attrs FeatureListSparseTypes(@Cast("const tensorflow::DataTypeSlice*") @ByRef DataTypeVector x); public static native @ByVal Attrs FeatureListDenseShapes(@ArraySlice PartialTensorShape x); public native @ByRef Operation operation(); public native ParseSingleSequenceExample operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector context_sparse_indices(); public native ParseSingleSequenceExample context_sparse_indices(OutputVector context_sparse_indices); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector context_sparse_values(); public native ParseSingleSequenceExample context_sparse_values(OutputVector context_sparse_values); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector context_sparse_shapes(); public native ParseSingleSequenceExample context_sparse_shapes(OutputVector context_sparse_shapes); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector context_dense_values(); public native ParseSingleSequenceExample context_dense_values(OutputVector context_dense_values); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector feature_list_sparse_indices(); public native ParseSingleSequenceExample feature_list_sparse_indices(OutputVector feature_list_sparse_indices); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector feature_list_sparse_values(); public native ParseSingleSequenceExample feature_list_sparse_values(OutputVector feature_list_sparse_values); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector feature_list_sparse_shapes(); public native ParseSingleSequenceExample feature_list_sparse_shapes(OutputVector feature_list_sparse_shapes); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector feature_list_dense_values(); public native ParseSingleSequenceExample feature_list_dense_values(OutputVector feature_list_dense_values); } /** Transforms a serialized tensorflow.TensorProto proto into a Tensor. * * Arguments: * * scope: A Scope object * * serialized: A scalar string containing a serialized TensorProto proto. * * out_type: The type of the serialized tensor. The provided type must match the * type of the serialized tensor and no implicit conversion will take place. * * Returns: * * {@code Output}: A Tensor of type {@code out_type}. */ @Namespace("tensorflow::ops") @NoOffset public static class ParseTensor extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ParseTensor(Pointer p) { super(p); } public ParseTensor(@Const @ByRef Scope scope, @ByVal Input serialized, @Cast("tensorflow::DataType") int out_type) { super((Pointer)null); allocate(scope, serialized, out_type); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input serialized, @Cast("tensorflow::DataType") int out_type); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native ParseTensor operation(Operation operation); public native @ByRef Output output(); public native ParseTensor output(Output output); } /** Transforms a Tensor into a serialized TensorProto proto. * * Arguments: * * scope: A Scope object * * tensor: A Tensor of type {@code T}. * * Returns: * * {@code Output}: A serialized TensorProto proto of the input tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class SerializeTensor extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SerializeTensor(Pointer p) { super(p); } public SerializeTensor(@Const @ByRef Scope scope, @ByVal Input tensor) { super((Pointer)null); allocate(scope, tensor); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input tensor); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SerializeTensor operation(Operation operation); public native @ByRef Output serialized(); public native SerializeTensor serialized(Output serialized); } /** Converts each string in the input Tensor to the specified numeric type. * * (Note that int32 overflow results in an error while float overflow * results in a rounded value.) * * Arguments: * * scope: A Scope object * * Optional attributes (see {@code Attrs}): * * out_type: The numeric type to interpret each string in {@code string_tensor} as. * * Returns: * * {@code Output}: A Tensor of the same shape as the input {@code string_tensor}. */ @Namespace("tensorflow::ops") @NoOffset public static class StringToNumber extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StringToNumber(Pointer p) { super(p); } /** Optional attribute setters for StringToNumber */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The numeric type to interpret each string in {@code string_tensor} as. * * Defaults to DT_FLOAT */ public native @ByVal Attrs OutType(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int out_type_(); public native Attrs out_type_(int out_type_); } public StringToNumber(@Const @ByRef Scope scope, @ByVal Input string_tensor) { super((Pointer)null); allocate(scope, string_tensor); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input string_tensor); public StringToNumber(@Const @ByRef Scope scope, @ByVal Input string_tensor, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, string_tensor, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input string_tensor, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs OutType(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native StringToNumber operation(Operation operation); public native @ByRef Output output(); public native StringToNumber output(Output output); } /** \} */ // namespace ops // namespace tensorflow // #endif // TENSORFLOW_CC_OPS_PARSING_OPS_H_ // Parsed from tensorflow/cc/ops/random_ops.h // This file is MACHINE GENERATED! Do not edit. // #ifndef TENSORFLOW_CC_OPS_RANDOM_OPS_H_ // #define TENSORFLOW_CC_OPS_RANDOM_OPS_H_ // This file is MACHINE GENERATED! Do not edit. // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/cc/framework/scope.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/gtl/array_slice.h" /** \defgroup random_ops Random Ops * \{

* Draws samples from a multinomial distribution. * * Arguments: * * scope: A Scope object * * logits: 2-D Tensor with shape {@code [batch_size, num_classes]}. Each slice {@code [i, :]} * represents the unnormalized log probabilities for all classes. * * num_samples: 0-D. Number of independent samples to draw for each row slice. * * Optional attributes (see {@code Attrs}): * * seed: If either seed or seed2 is set to be non-zero, the internal random number * generator is seeded by the given seed. Otherwise, a random seed is used. * * seed2: A second seed to avoid seed collision. * * Returns: * * {@code Output}: 2-D Tensor with shape {@code [batch_size, num_samples]}. Each slice {@code [i, :]} * contains the drawn class labels with range {@code [0, num_classes)}. */ @Namespace("tensorflow::ops") @NoOffset public static class Multinomial extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Multinomial(Pointer p) { super(p); } /** Optional attribute setters for Multinomial */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If either seed or seed2 is set to be non-zero, the internal random number * generator is seeded by the given seed. Otherwise, a random seed is used. * * Defaults to 0 */ /// public native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); /** A second seed to avoid seed collision. * * Defaults to 0 */ public native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); /** Defaults to DT_INT64 */ public native @ByVal Attrs OutputDtype(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::int64") long seed_(); public native Attrs seed_(long seed_); public native @Cast("tensorflow::int64") long seed2_(); public native Attrs seed2_(long seed2_); public native @Cast("tensorflow::DataType") int output_dtype_(); public native Attrs output_dtype_(int output_dtype_); } public Multinomial(@Const @ByRef Scope scope, @ByVal Input logits, @ByVal Input num_samples) { super((Pointer)null); allocate(scope, logits, num_samples); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input logits, @ByVal Input num_samples); public Multinomial(@Const @ByRef Scope scope, @ByVal Input logits, @ByVal Input num_samples, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, logits, num_samples, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input logits, @ByVal Input num_samples, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs OutputDtype(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native Multinomial operation(Operation operation); public native @ByRef Output output(); public native Multinomial output(Output output); } /** Outputs random values from a normal distribution. The parameters may each be a * * scalar which applies to the entire output, or a vector of length shape[0] which * stores the parameters for each batch. * * Arguments: * * scope: A Scope object * * shape: The shape of the output tensor. Batches are indexed by the 0th dimension. * * means: The mean parameter of each batch. * * stdevs: The standard deviation parameter of each batch. Must be greater than 0. * * minvals: The minimum cutoff. May be -infinity. * * maxvals: The maximum cutoff. May be +infinity, and must be more than the minval * for each batch. * * Optional attributes (see {@code Attrs}): * * seed: If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * seed2: A second seed to avoid seed collision. * * Returns: * * {@code Output}: A matrix of shape num_batches x samples_per_batch, filled with random * truncated normal values using the parameters for each row. */ @Namespace("tensorflow::ops") @NoOffset public static class ParameterizedTruncatedNormal extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ParameterizedTruncatedNormal(Pointer p) { super(p); } /** Optional attribute setters for ParameterizedTruncatedNormal */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * Defaults to 0 */ /// public native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); /** A second seed to avoid seed collision. * * Defaults to 0 */ public native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long seed_(); public native Attrs seed_(long seed_); public native @Cast("tensorflow::int64") long seed2_(); public native Attrs seed2_(long seed2_); } public ParameterizedTruncatedNormal(@Const @ByRef Scope scope, @ByVal Input shape, @ByVal Input means, @ByVal Input stdevs, @ByVal Input minvals, @ByVal Input maxvals) { super((Pointer)null); allocate(scope, shape, means, stdevs, minvals, maxvals); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input shape, @ByVal Input means, @ByVal Input stdevs, @ByVal Input minvals, @ByVal Input maxvals); public ParameterizedTruncatedNormal(@Const @ByRef Scope scope, @ByVal Input shape, @ByVal Input means, @ByVal Input stdevs, @ByVal Input minvals, @ByVal Input maxvals, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, shape, means, stdevs, minvals, maxvals, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input shape, @ByVal Input means, @ByVal Input stdevs, @ByVal Input minvals, @ByVal Input maxvals, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native ParameterizedTruncatedNormal operation(Operation operation); public native @ByRef Output output(); public native ParameterizedTruncatedNormal output(Output output); } /** Outputs random values from the Gamma distribution(s) described by alpha. * * This op uses the algorithm by Marsaglia et al. to acquire samples via * transformation-rejection from pairs of uniform and normal random variables. * See http://dl.acm.org/citation.cfm?id=358414 * * Arguments: * * scope: A Scope object * * shape: 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in alpha. * * alpha: A tensor in which each scalar is a "shape" parameter describing the * associated gamma distribution. * * Optional attributes (see {@code Attrs}): * * seed: If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * seed2: A second seed to avoid seed collision. * * Returns: * * {@code Output}: A tensor with shape {@code shape + shape(alpha)}. Each slice * {@code [:, ..., :, i0, i1, ...iN]} contains the samples drawn for * {@code alpha[i0, i1, ...iN]}. The dtype of the output matches the dtype of alpha. */ @Namespace("tensorflow::ops") @NoOffset public static class RandomGamma extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RandomGamma(Pointer p) { super(p); } /** Optional attribute setters for RandomGamma */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * Defaults to 0 */ /// public native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); /** A second seed to avoid seed collision. * * Defaults to 0 */ public native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long seed_(); public native Attrs seed_(long seed_); public native @Cast("tensorflow::int64") long seed2_(); public native Attrs seed2_(long seed2_); } public RandomGamma(@Const @ByRef Scope scope, @ByVal Input shape, @ByVal Input alpha) { super((Pointer)null); allocate(scope, shape, alpha); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input shape, @ByVal Input alpha); public RandomGamma(@Const @ByRef Scope scope, @ByVal Input shape, @ByVal Input alpha, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, shape, alpha, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input shape, @ByVal Input alpha, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native RandomGamma operation(Operation operation); public native @ByRef Output output(); public native RandomGamma output(Output output); } /** Outputs random values from the Poisson distribution(s) described by rate. * * This op uses two algorithms, depending on rate. If rate >= 10, then * the algorithm by Hormann is used to acquire samples via * transformation-rejection. * See http://www.sciencedirect.com/science/article/pii/0167668793909974. * * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform * random variables. * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer * Programming, Volume 2. Addison Wesley * * Arguments: * * scope: A Scope object * * shape: 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in rate. * * rate: A tensor in which each scalar is a "rate" parameter describing the * associated poisson distribution. * * Optional attributes (see {@code Attrs}): * * seed: If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * seed2: A second seed to avoid seed collision. * * Returns: * * {@code Output}: A tensor with shape {@code shape + shape(rate)}. Each slice * {@code [:, ..., :, i0, i1, ...iN]} contains the samples drawn for * {@code rate[i0, i1, ...iN]}. */ @Namespace("tensorflow::ops") @NoOffset public static class RandomPoissonV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RandomPoissonV2(Pointer p) { super(p); } /** Optional attribute setters for RandomPoissonV2 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * Defaults to 0 */ /// public native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); /** A second seed to avoid seed collision. * * Defaults to 0 */ public native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); /** Defaults to DT_INT64 */ public native @ByVal Attrs Dtype(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::int64") long seed_(); public native Attrs seed_(long seed_); public native @Cast("tensorflow::int64") long seed2_(); public native Attrs seed2_(long seed2_); public native @Cast("tensorflow::DataType") int dtype_(); public native Attrs dtype_(int dtype_); } public RandomPoissonV2(@Const @ByRef Scope scope, @ByVal Input shape, @ByVal Input rate) { super((Pointer)null); allocate(scope, shape, rate); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input shape, @ByVal Input rate); public RandomPoissonV2(@Const @ByRef Scope scope, @ByVal Input shape, @ByVal Input rate, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, shape, rate, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input shape, @ByVal Input rate, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Dtype(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native RandomPoissonV2 operation(Operation operation); public native @ByRef Output output(); public native RandomPoissonV2 output(Output output); } /** Randomly shuffles a tensor along its first dimension. * * The tensor is shuffled along dimension 0, such that each {@code value[j]} is mapped * to one and only one {@code output[i]}. For example, a mapping that might occur for a * 3x2 tensor is: * *

{@code
 *  [[1, 2],       [[5, 6],
 *   [3, 4],  ==>   [1, 2],
 *   [5, 6]]        [3, 4]]
 *  }
* * Arguments: * * scope: A Scope object * * value: The tensor to be shuffled. * * Optional attributes (see {@code Attrs}): * * seed: If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * seed2: A second seed to avoid seed collision. * * Returns: * * {@code Output}: A tensor of same shape and type as {@code value}, shuffled along its first * dimension. */ @Namespace("tensorflow::ops") @NoOffset public static class RandomShuffle extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RandomShuffle(Pointer p) { super(p); } /** Optional attribute setters for RandomShuffle */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * Defaults to 0 */ /// public native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); /** A second seed to avoid seed collision. * * Defaults to 0 */ public native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long seed_(); public native Attrs seed_(long seed_); public native @Cast("tensorflow::int64") long seed2_(); public native Attrs seed2_(long seed2_); } public RandomShuffle(@Const @ByRef Scope scope, @ByVal Input value) { super((Pointer)null); allocate(scope, value); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value); public RandomShuffle(@Const @ByRef Scope scope, @ByVal Input value, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, value, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input value, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native RandomShuffle operation(Operation operation); public native @ByRef Output output(); public native RandomShuffle output(Output output); } /** Outputs random values from a normal distribution. * * The generated values will have mean 0 and standard deviation 1. * * Arguments: * * scope: A Scope object * * shape: The shape of the output tensor. * * dtype: The type of the output. * * Optional attributes (see {@code Attrs}): * * seed: If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * seed2: A second seed to avoid seed collision. * * Returns: * * {@code Output}: A tensor of the specified shape filled with random normal values. */ @Namespace("tensorflow::ops") @NoOffset public static class RandomNormal extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RandomNormal(Pointer p) { super(p); } /** Optional attribute setters for RandomNormal */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * Defaults to 0 */ /// public native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); /** A second seed to avoid seed collision. * * Defaults to 0 */ public native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long seed_(); public native Attrs seed_(long seed_); public native @Cast("tensorflow::int64") long seed2_(); public native Attrs seed2_(long seed2_); } public RandomNormal(@Const @ByRef Scope scope, @ByVal Input shape, @Cast("tensorflow::DataType") int dtype) { super((Pointer)null); allocate(scope, shape, dtype); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input shape, @Cast("tensorflow::DataType") int dtype); public RandomNormal(@Const @ByRef Scope scope, @ByVal Input shape, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, shape, dtype, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input shape, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native RandomNormal operation(Operation operation); public native @ByRef Output output(); public native RandomNormal output(Output output); } /** Outputs random values from a uniform distribution. * * The generated values follow a uniform distribution in the range {@code [0, 1)}. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. * * Arguments: * * scope: A Scope object * * shape: The shape of the output tensor. * * dtype: The type of the output. * * Optional attributes (see {@code Attrs}): * * seed: If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * seed2: A second seed to avoid seed collision. * * Returns: * * {@code Output}: A tensor of the specified shape filled with uniform random values. */ @Namespace("tensorflow::ops") @NoOffset public static class RandomUniform extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RandomUniform(Pointer p) { super(p); } /** Optional attribute setters for RandomUniform */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * Defaults to 0 */ /// public native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); /** A second seed to avoid seed collision. * * Defaults to 0 */ public native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long seed_(); public native Attrs seed_(long seed_); public native @Cast("tensorflow::int64") long seed2_(); public native Attrs seed2_(long seed2_); } public RandomUniform(@Const @ByRef Scope scope, @ByVal Input shape, @Cast("tensorflow::DataType") int dtype) { super((Pointer)null); allocate(scope, shape, dtype); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input shape, @Cast("tensorflow::DataType") int dtype); public RandomUniform(@Const @ByRef Scope scope, @ByVal Input shape, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, shape, dtype, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input shape, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native RandomUniform operation(Operation operation); public native @ByRef Output output(); public native RandomUniform output(Output output); } /** Outputs random integers from a uniform distribution. * * The generated values are uniform integers in the range {@code [minval, maxval)}. * The lower bound {@code minval} is included in the range, while the upper bound * {@code maxval} is excluded. * * The random integers are slightly biased unless {@code maxval - minval} is an exact * power of two. The bias is small for values of {@code maxval - minval} significantly * smaller than the range of the output (either {@code 2^32} or {@code 2^64}). * * Arguments: * * scope: A Scope object * * shape: The shape of the output tensor. * * minval: 0-D. Inclusive lower bound on the generated integers. * * maxval: 0-D. Exclusive upper bound on the generated integers. * * Optional attributes (see {@code Attrs}): * * seed: If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * seed2: A second seed to avoid seed collision. * * Returns: * * {@code Output}: A tensor of the specified shape filled with uniform random integers. */ @Namespace("tensorflow::ops") @NoOffset public static class RandomUniformInt extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RandomUniformInt(Pointer p) { super(p); } /** Optional attribute setters for RandomUniformInt */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * Defaults to 0 */ /// public native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); /** A second seed to avoid seed collision. * * Defaults to 0 */ public native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long seed_(); public native Attrs seed_(long seed_); public native @Cast("tensorflow::int64") long seed2_(); public native Attrs seed2_(long seed2_); } public RandomUniformInt(@Const @ByRef Scope scope, @ByVal Input shape, @ByVal Input minval, @ByVal Input maxval) { super((Pointer)null); allocate(scope, shape, minval, maxval); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input shape, @ByVal Input minval, @ByVal Input maxval); public RandomUniformInt(@Const @ByRef Scope scope, @ByVal Input shape, @ByVal Input minval, @ByVal Input maxval, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, shape, minval, maxval, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input shape, @ByVal Input minval, @ByVal Input maxval, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native RandomUniformInt operation(Operation operation); public native @ByRef Output output(); public native RandomUniformInt output(Output output); } /** Outputs random values from a truncated normal distribution. * * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. * * Arguments: * * scope: A Scope object * * shape: The shape of the output tensor. * * dtype: The type of the output. * * Optional attributes (see {@code Attrs}): * * seed: If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * seed2: A second seed to avoid seed collision. * * Returns: * * {@code Output}: A tensor of the specified shape filled with random truncated normal * values. */ @Namespace("tensorflow::ops") @NoOffset public static class TruncatedNormal extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TruncatedNormal(Pointer p) { super(p); } /** Optional attribute setters for TruncatedNormal */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If either {@code seed} or {@code seed2} are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * * Defaults to 0 */ /// public native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); /** A second seed to avoid seed collision. * * Defaults to 0 */ public native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long seed_(); public native Attrs seed_(long seed_); public native @Cast("tensorflow::int64") long seed2_(); public native Attrs seed2_(long seed2_); } public TruncatedNormal(@Const @ByRef Scope scope, @ByVal Input shape, @Cast("tensorflow::DataType") int dtype) { super((Pointer)null); allocate(scope, shape, dtype); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input shape, @Cast("tensorflow::DataType") int dtype); public TruncatedNormal(@Const @ByRef Scope scope, @ByVal Input shape, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, shape, dtype, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input shape, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Seed(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Seed2(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native TruncatedNormal operation(Operation operation); public native @ByRef Output output(); public native TruncatedNormal output(Output output); } /** \} */ // namespace ops // namespace tensorflow // #endif // TENSORFLOW_CC_OPS_RANDOM_OPS_H_ // Parsed from tensorflow/cc/ops/sparse_ops.h // This file is MACHINE GENERATED! Do not edit. // #ifndef TENSORFLOW_CC_OPS_SPARSE_OPS_H_ // #define TENSORFLOW_CC_OPS_SPARSE_OPS_H_ // This file is MACHINE GENERATED! Do not edit. // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/cc/framework/scope.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/gtl/array_slice.h" /** \defgroup sparse_ops Sparse Ops * \{

* Add an {@code N}-minibatch {@code SparseTensor} to a {@code SparseTensorsMap}, return {@code N} handles. * * A {@code SparseTensor} of rank {@code R} is represented by three tensors: {@code sparse_indices}, * {@code sparse_values}, and {@code sparse_shape}, where * *

{@code sparse_indices.shape[1] == sparse_shape.shape[0] == R}
* * An {@code N}-minibatch of {@code SparseTensor} objects is represented as a {@code SparseTensor} * having a first {@code sparse_indices} column taking values between {@code [0, N)}, where * the minibatch size {@code N == sparse_shape[0]}. * * The input {@code SparseTensor} must have rank {@code R} greater than 1, and the first * dimension is treated as the minibatch dimension. Elements of the {@code SparseTensor} * must be sorted in increasing order of this first dimension. The stored * {@code SparseTensor} objects pointed to by each row of the output {@code sparse_handles} * will have rank {@code R-1}. * * The {@code SparseTensor} values can then be read out as part of a minibatch by passing * the given keys as vector elements to {@code TakeManySparseFromTensorsMap}. To ensure * the correct {@code SparseTensorsMap} is accessed, ensure that the same * {@code container} and {@code shared_name} are passed to that Op. If no {@code shared_name} * is provided here, instead use the *name* of the Operation created by calling * {@code AddManySparseToTensorsMap} as the {@code shared_name} passed to * {@code TakeManySparseFromTensorsMap}. Ensure the Operations are colocated. * * Arguments: * * scope: A Scope object * * sparse_indices: 2-D. The {@code indices} of the minibatch {@code SparseTensor}. * {@code sparse_indices[:, 0]} must be ordered values in {@code [0, N)}. * * sparse_values: 1-D. The {@code values} of the minibatch {@code SparseTensor}. * * sparse_shape: 1-D. The {@code shape} of the minibatch {@code SparseTensor}. * The minibatch size {@code N == sparse_shape[0]}. * * Optional attributes (see {@code Attrs}): * * container: The container name for the {@code SparseTensorsMap} created by this op. * * shared_name: The shared name for the {@code SparseTensorsMap} created by this op. * If blank, the new Operation's unique name is used. * * Returns: * * {@code Output}: 1-D. The handles of the {@code SparseTensor} now stored in the * {@code SparseTensorsMap}. Shape: {@code [N]}. */ @Namespace("tensorflow::ops") @NoOffset public static class AddManySparseToTensorsMap extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AddManySparseToTensorsMap(Pointer p) { super(p); } /** Optional attribute setters for AddManySparseToTensorsMap */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The container name for the {@code SparseTensorsMap} created by this op. * * Defaults to "" */ /// public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** The shared name for the {@code SparseTensorsMap} created by this op. * If blank, the new Operation's unique name is used. * * Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public AddManySparseToTensorsMap(@Const @ByRef Scope scope, @ByVal Input sparse_indices, @ByVal Input sparse_values, @ByVal Input sparse_shape) { super((Pointer)null); allocate(scope, sparse_indices, sparse_values, sparse_shape); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input sparse_indices, @ByVal Input sparse_values, @ByVal Input sparse_shape); public AddManySparseToTensorsMap(@Const @ByRef Scope scope, @ByVal Input sparse_indices, @ByVal Input sparse_values, @ByVal Input sparse_shape, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, sparse_indices, sparse_values, sparse_shape, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input sparse_indices, @ByVal Input sparse_values, @ByVal Input sparse_shape, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native AddManySparseToTensorsMap operation(Operation operation); public native @ByRef Output sparse_handles(); public native AddManySparseToTensorsMap sparse_handles(Output sparse_handles); } /** Add a {@code SparseTensor} to a {@code SparseTensorsMap} return its handle. * * A {@code SparseTensor} is represented by three tensors: {@code sparse_indices}, * {@code sparse_values}, and {@code sparse_shape}. * * This operator takes the given {@code SparseTensor} and adds it to a container * object (a {@code SparseTensorsMap}). A unique key within this container is generated * in the form of an {@code int64}, and this is the value that is returned. * * The {@code SparseTensor} can then be read out as part of a minibatch by passing * the key as a vector element to {@code TakeManySparseFromTensorsMap}. To ensure * the correct {@code SparseTensorsMap} is accessed, ensure that the same * {@code container} and {@code shared_name} are passed to that Op. If no {@code shared_name} * is provided here, instead use the *name* of the Operation created by calling * {@code AddSparseToTensorsMap} as the {@code shared_name} passed to * {@code TakeManySparseFromTensorsMap}. Ensure the Operations are colocated. * * Arguments: * * scope: A Scope object * * sparse_indices: 2-D. The {@code indices} of the {@code SparseTensor}. * * sparse_values: 1-D. The {@code values} of the {@code SparseTensor}. * * sparse_shape: 1-D. The {@code shape} of the {@code SparseTensor}. * * Optional attributes (see {@code Attrs}): * * container: The container name for the {@code SparseTensorsMap} created by this op. * * shared_name: The shared name for the {@code SparseTensorsMap} created by this op. * If blank, the new Operation's unique name is used. * * Returns: * * {@code Output}: 0-D. The handle of the {@code SparseTensor} now stored in the * {@code SparseTensorsMap}. */ @Namespace("tensorflow::ops") @NoOffset public static class AddSparseToTensorsMap extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AddSparseToTensorsMap(Pointer p) { super(p); } /** Optional attribute setters for AddSparseToTensorsMap */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The container name for the {@code SparseTensorsMap} created by this op. * * Defaults to "" */ /// public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** The shared name for the {@code SparseTensorsMap} created by this op. * If blank, the new Operation's unique name is used. * * Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public AddSparseToTensorsMap(@Const @ByRef Scope scope, @ByVal Input sparse_indices, @ByVal Input sparse_values, @ByVal Input sparse_shape) { super((Pointer)null); allocate(scope, sparse_indices, sparse_values, sparse_shape); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input sparse_indices, @ByVal Input sparse_values, @ByVal Input sparse_shape); public AddSparseToTensorsMap(@Const @ByRef Scope scope, @ByVal Input sparse_indices, @ByVal Input sparse_values, @ByVal Input sparse_shape, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, sparse_indices, sparse_values, sparse_shape, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input sparse_indices, @ByVal Input sparse_values, @ByVal Input sparse_shape, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native AddSparseToTensorsMap operation(Operation operation); public native @ByRef Output sparse_handle(); public native AddSparseToTensorsMap sparse_handle(Output sparse_handle); } /** Deserialize and concatenate {@code SparseTensors} from a serialized minibatch. * * The input {@code serialized_sparse} must be a string matrix of shape {@code [N x 3]} where * {@code N} is the minibatch size and the rows correspond to packed outputs of * {@code SerializeSparse}. The ranks of the original {@code SparseTensor} objects * must all match. When the final {@code SparseTensor} is created, it has rank one * higher than the ranks of the incoming {@code SparseTensor} objects * (they have been concatenated along a new row dimension). * * The output {@code SparseTensor} object's shape values for all dimensions but the * first are the max across the input {@code SparseTensor} objects' shape values * for the corresponding dimensions. Its first shape value is {@code N}, the minibatch * size. * * The input {@code SparseTensor} objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this * step run {@code SparseReorder} to restore index ordering. * * For example, if the serialized input is a {@code [2 x 3]} matrix representing two * original {@code SparseTensor} objects: * * index = [ 0] * [10] * [20] * values = [1, 2, 3] * shape = [50] * * and * * index = [ 2] * [10] * values = [4, 5] * shape = [30] * * then the final deserialized {@code SparseTensor} will be: * * index = [0 0] * [0 10] * [0 20] * [1 2] * [1 10] * values = [1, 2, 3, 4, 5] * shape = [2 50] * * Arguments: * * scope: A Scope object * * serialized_sparse: 2-D, The {@code N} serialized {@code SparseTensor} objects. * Must have 3 columns. * * dtype: The {@code dtype} of the serialized {@code SparseTensor} objects. * * Returns: * * {@code Output} sparse_indices * * {@code Output} sparse_values * * {@code Output} sparse_shape */ @Namespace("tensorflow::ops") @NoOffset public static class DeserializeManySparse extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DeserializeManySparse(Pointer p) { super(p); } public DeserializeManySparse(@Const @ByRef Scope scope, @ByVal Input serialized_sparse, @Cast("tensorflow::DataType") int dtype) { super((Pointer)null); allocate(scope, serialized_sparse, dtype); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input serialized_sparse, @Cast("tensorflow::DataType") int dtype); public native @ByRef Operation operation(); public native DeserializeManySparse operation(Operation operation); public native @ByRef Output sparse_indices(); public native DeserializeManySparse sparse_indices(Output sparse_indices); public native @ByRef Output sparse_values(); public native DeserializeManySparse sparse_values(Output sparse_values); public native @ByRef Output sparse_shape(); public native DeserializeManySparse sparse_shape(Output sparse_shape); } /** Deserialize {@code SparseTensor} objects. * * The input {@code serialized_sparse} must have the shape {@code [?, ?, ..., ?, 3]} where * the last dimension stores serialized {@code SparseTensor} objects and the other N * dimensions (N >= 0) correspond to a batch. The ranks of the original * {@code SparseTensor} objects must all match. When the final {@code SparseTensor} is * created, its rank is the rank of the incoming {@code SparseTensor} objects plus N; * the sparse tensors have been concatenated along new dimensions, one for each * batch. * * The output {@code SparseTensor} object's shape values for the original dimensions * are the max across the input {@code SparseTensor} objects' shape values for the * corresponding dimensions. The new dimensions match the size of the batch. * * The input {@code SparseTensor} objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this * step run {@code SparseReorder} to restore index ordering. * * For example, if the serialized input is a {@code [2 x 3]} matrix representing two * original {@code SparseTensor} objects: * * index = [ 0] * [10] * [20] * values = [1, 2, 3] * shape = [50] * * and * * index = [ 2] * [10] * values = [4, 5] * shape = [30] * * then the final deserialized {@code SparseTensor} will be: * * index = [0 0] * [0 10] * [0 20] * [1 2] * [1 10] * values = [1, 2, 3, 4, 5] * shape = [2 50] * * Arguments: * * scope: A Scope object * * serialized_sparse: The serialized {@code SparseTensor} objects. The last dimension * must have 3 columns. * * dtype: The {@code dtype} of the serialized {@code SparseTensor} objects. * * Returns: * * {@code Output} sparse_indices * * {@code Output} sparse_values * * {@code Output} sparse_shape */ @Namespace("tensorflow::ops") @NoOffset public static class DeserializeSparse extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DeserializeSparse(Pointer p) { super(p); } public DeserializeSparse(@Const @ByRef Scope scope, @ByVal Input serialized_sparse, @Cast("tensorflow::DataType") int dtype) { super((Pointer)null); allocate(scope, serialized_sparse, dtype); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input serialized_sparse, @Cast("tensorflow::DataType") int dtype); public native @ByRef Operation operation(); public native DeserializeSparse operation(Operation operation); public native @ByRef Output sparse_indices(); public native DeserializeSparse sparse_indices(Output sparse_indices); public native @ByRef Output sparse_values(); public native DeserializeSparse sparse_values(Output sparse_values); public native @ByRef Output sparse_shape(); public native DeserializeSparse sparse_shape(Output sparse_shape); } /** Serialize an {@code N}-minibatch {@code SparseTensor} into an {@code [N, 3]} {@code Tensor} object. * * The {@code SparseTensor} must have rank {@code R} greater than 1, and the first dimension * is treated as the minibatch dimension. Elements of the {@code SparseTensor} * must be sorted in increasing order of this first dimension. The serialized * {@code SparseTensor} objects going into each row of {@code serialized_sparse} will have * rank {@code R-1}. * * The minibatch size {@code N} is extracted from {@code sparse_shape[0]}. * * Arguments: * * scope: A Scope object * * sparse_indices: 2-D. The {@code indices} of the minibatch {@code SparseTensor}. * * sparse_values: 1-D. The {@code values} of the minibatch {@code SparseTensor}. * * sparse_shape: 1-D. The {@code shape} of the minibatch {@code SparseTensor}. * * Optional attributes (see {@code Attrs}): * * out_type: The {@code dtype} to use for serialization; the supported types are {@code string} * (default) and {@code variant}. * * Returns: * * {@code Output}: The serialized_sparse tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class SerializeManySparse extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SerializeManySparse(Pointer p) { super(p); } /** Optional attribute setters for SerializeManySparse */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The {@code dtype} to use for serialization; the supported types are {@code string} * (default) and {@code variant}. * * Defaults to DT_STRING */ public native @ByVal Attrs OutType(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int out_type_(); public native Attrs out_type_(int out_type_); } public SerializeManySparse(@Const @ByRef Scope scope, @ByVal Input sparse_indices, @ByVal Input sparse_values, @ByVal Input sparse_shape) { super((Pointer)null); allocate(scope, sparse_indices, sparse_values, sparse_shape); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input sparse_indices, @ByVal Input sparse_values, @ByVal Input sparse_shape); public SerializeManySparse(@Const @ByRef Scope scope, @ByVal Input sparse_indices, @ByVal Input sparse_values, @ByVal Input sparse_shape, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, sparse_indices, sparse_values, sparse_shape, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input sparse_indices, @ByVal Input sparse_values, @ByVal Input sparse_shape, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs OutType(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native SerializeManySparse operation(Operation operation); public native @ByRef Output serialized_sparse(); public native SerializeManySparse serialized_sparse(Output serialized_sparse); } /** Serialize a {@code SparseTensor} into a {@code [3]} {@code Tensor} object. * * Arguments: * * scope: A Scope object * * sparse_indices: 2-D. The {@code indices} of the {@code SparseTensor}. * * sparse_values: 1-D. The {@code values} of the {@code SparseTensor}. * * sparse_shape: 1-D. The {@code shape} of the {@code SparseTensor}. * * Optional attributes (see {@code Attrs}): * * out_type: The {@code dtype} to use for serialization; the supported types are {@code string} * (default) and {@code variant}. * * Returns: * * {@code Output}: The serialized_sparse tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class SerializeSparse extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SerializeSparse(Pointer p) { super(p); } /** Optional attribute setters for SerializeSparse */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The {@code dtype} to use for serialization; the supported types are {@code string} * (default) and {@code variant}. * * Defaults to DT_STRING */ public native @ByVal Attrs OutType(@Cast("tensorflow::DataType") int x); public native @Cast("tensorflow::DataType") int out_type_(); public native Attrs out_type_(int out_type_); } public SerializeSparse(@Const @ByRef Scope scope, @ByVal Input sparse_indices, @ByVal Input sparse_values, @ByVal Input sparse_shape) { super((Pointer)null); allocate(scope, sparse_indices, sparse_values, sparse_shape); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input sparse_indices, @ByVal Input sparse_values, @ByVal Input sparse_shape); public SerializeSparse(@Const @ByRef Scope scope, @ByVal Input sparse_indices, @ByVal Input sparse_values, @ByVal Input sparse_shape, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, sparse_indices, sparse_values, sparse_shape, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input sparse_indices, @ByVal Input sparse_values, @ByVal Input sparse_shape, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs OutType(@Cast("tensorflow::DataType") int x); public native @ByRef Operation operation(); public native SerializeSparse operation(Operation operation); public native @ByRef Output serialized_sparse(); public native SerializeSparse serialized_sparse(Output serialized_sparse); } /** Adds two {@code SparseTensor} objects to produce another {@code SparseTensor}. * * The input {@code SparseTensor} objects' indices are assumed ordered in standard * lexicographic order. If this is not the case, before this step run * {@code SparseReorder} to restore index ordering. * * By default, if two values sum to zero at some index, the output {@code SparseTensor} * would still include that particular location in its index, storing a zero in the * corresponding value slot. To override this, callers can specify {@code thresh}, * indicating that if the sum has a magnitude strictly smaller than {@code thresh}, its * corresponding value and index would then not be included. In particular, * {@code thresh == 0} (default) means everything is kept and actual thresholding happens * only for a positive value. * * In the following shapes, {@code nnz} is the count after taking {@code thresh} into account. * * Arguments: * * scope: A Scope object * * a_indices: 2-D. The {@code indices} of the first {@code SparseTensor}, size {@code [nnz, ndims]} Matrix. * * a_values: 1-D. The {@code values} of the first {@code SparseTensor}, size {@code [nnz]} Vector. * * a_shape: 1-D. The {@code shape} of the first {@code SparseTensor}, size {@code [ndims]} Vector. * * b_indices: 2-D. The {@code indices} of the second {@code SparseTensor}, size {@code [nnz, ndims]} Matrix. * * b_values: 1-D. The {@code values} of the second {@code SparseTensor}, size {@code [nnz]} Vector. * * b_shape: 1-D. The {@code shape} of the second {@code SparseTensor}, size {@code [ndims]} Vector. * * thresh: 0-D. The magnitude threshold that determines if an output value/index * pair takes space. * * Returns: * * {@code Output} sum_indices * * {@code Output} sum_values * * {@code Output} sum_shape */ @Namespace("tensorflow::ops") @NoOffset public static class SparseAdd extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseAdd(Pointer p) { super(p); } public SparseAdd(@Const @ByRef Scope scope, @ByVal Input a_indices, @ByVal Input a_values, @ByVal Input a_shape, @ByVal Input b_indices, @ByVal Input b_values, @ByVal Input b_shape, @ByVal Input thresh) { super((Pointer)null); allocate(scope, a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input a_indices, @ByVal Input a_values, @ByVal Input a_shape, @ByVal Input b_indices, @ByVal Input b_values, @ByVal Input b_shape, @ByVal Input thresh); public native @ByRef Operation operation(); public native SparseAdd operation(Operation operation); public native @ByRef Output sum_indices(); public native SparseAdd sum_indices(Output sum_indices); public native @ByRef Output sum_values(); public native SparseAdd sum_values(Output sum_values); public native @ByRef Output sum_shape(); public native SparseAdd sum_shape(Output sum_shape); } /** The gradient operator for the SparseAdd op. * * The SparseAdd op calculates A + B, where A, B, and the sum are all represented * as {@code SparseTensor} objects. This op takes in the upstream gradient w.r.t. * non-empty values of the sum, and outputs the gradients w.r.t. the non-empty * values of A and B. * * Arguments: * * scope: A Scope object * * backprop_val_grad: 1-D with shape {@code [nnz(sum)]}. The gradient with respect to * the non-empty values of the sum. * * a_indices: 2-D. The {@code indices} of the {@code SparseTensor} A, size {@code [nnz(A), ndims]}. * * b_indices: 2-D. The {@code indices} of the {@code SparseTensor} B, size {@code [nnz(B), ndims]}. * * sum_indices: 2-D. The {@code indices} of the sum {@code SparseTensor}, size * {@code [nnz(sum), ndims]}. * * Returns: * * {@code Output} a_val_grad: 1-D with shape {@code [nnz(A)]}. The gradient with respect to the * non-empty values of A. * * {@code Output} b_val_grad: 1-D with shape {@code [nnz(B)]}. The gradient with respect to the * non-empty values of B. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseAddGrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseAddGrad(Pointer p) { super(p); } public SparseAddGrad(@Const @ByRef Scope scope, @ByVal Input backprop_val_grad, @ByVal Input a_indices, @ByVal Input b_indices, @ByVal Input sum_indices) { super((Pointer)null); allocate(scope, backprop_val_grad, a_indices, b_indices, sum_indices); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input backprop_val_grad, @ByVal Input a_indices, @ByVal Input b_indices, @ByVal Input sum_indices); public native @ByRef Operation operation(); public native SparseAddGrad operation(Operation operation); public native @ByRef Output a_val_grad(); public native SparseAddGrad a_val_grad(Output a_val_grad); public native @ByRef Output b_val_grad(); public native SparseAddGrad b_val_grad(Output b_val_grad); } /** Concatenates a list of {@code SparseTensor} along the specified dimension. * * Concatenation is with respect to the dense versions of these sparse tensors. * It is assumed that each input is a {@code SparseTensor} whose elements are ordered * along increasing dimension number. * * All inputs' shapes must match, except for the concat dimension. The * {@code indices}, {@code values}, and {@code shapes} lists must have the same length. * * The output shape is identical to the inputs', except along the concat * dimension, where it is the sum of the inputs' sizes along that dimension. * * The output elements will be resorted to preserve the sort order along * increasing dimension number. * * This op runs in {@code O(M log M)} time, where {@code M} is the total number of non-empty * values across all inputs. This is due to the need for an internal sort in * order to concatenate efficiently across an arbitrary dimension. * * For example, if {@code concat_dim = 1} and the inputs are * * sp_inputs[0]: shape = [2, 3] * [0, 2]: "a" * [1, 0]: "b" * [1, 1]: "c" * * sp_inputs[1]: shape = [2, 4] * [0, 1]: "d" * [0, 2]: "e" * * then the output will be * * shape = [2, 7] * [0, 2]: "a" * [0, 4]: "d" * [0, 5]: "e" * [1, 0]: "b" * [1, 1]: "c" * * Graphically this is equivalent to doing * * [ a] concat [ d e ] = [ a d e ] * [b c ] [ ] [b c ] * * Arguments: * * scope: A Scope object * * indices: 2-D. Indices of each input {@code SparseTensor}. * * values: 1-D. Non-empty values of each {@code SparseTensor}. * * shapes: 1-D. Shapes of each {@code SparseTensor}. * * concat_dim: Dimension to concatenate along. Must be in range [-rank, rank), * where rank is the number of dimensions in each input {@code SparseTensor}. * * Returns: * * {@code Output} output_indices: 2-D. Indices of the concatenated {@code SparseTensor}. * * {@code Output} output_values: 1-D. Non-empty values of the concatenated {@code SparseTensor}. * * {@code Output} output_shape: 1-D. Shape of the concatenated {@code SparseTensor}. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseConcat extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseConcat(Pointer p) { super(p); } public SparseConcat(@Const @ByRef Scope scope, @ByVal InputList indices, @ByVal InputList values, @ByVal InputList shapes, @Cast("tensorflow::int64") long concat_dim) { super((Pointer)null); allocate(scope, indices, values, shapes, concat_dim); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList indices, @ByVal InputList values, @ByVal InputList shapes, @Cast("tensorflow::int64") long concat_dim); public native @ByRef Operation operation(); public native SparseConcat operation(Operation operation); public native @ByRef Output output_indices(); public native SparseConcat output_indices(Output output_indices); public native @ByRef Output output_values(); public native SparseConcat output_values(Output output_values); public native @ByRef Output output_shape(); public native SparseConcat output_shape(Output output_shape); } /** Generates sparse cross from a list of sparse and dense tensors. * * The op takes two lists, one of 2D {@code SparseTensor} and one of 2D {@code Tensor}, each * representing features of one feature column. It outputs a 2D {@code SparseTensor} with * the batchwise crosses of these features. * * For example, if the inputs are * * inputs[0]: SparseTensor with shape = [2, 2] * [0, 0]: "a" * [1, 0]: "b" * [1, 1]: "c" * * inputs[1]: SparseTensor with shape = [2, 1] * [0, 0]: "d" * [1, 0]: "e" * * inputs[2]: Tensor [["f"], ["g"]] * * then the output will be * * shape = [2, 2] * [0, 0]: "a_X_d_X_f" * [1, 0]: "b_X_e_X_g" * [1, 1]: "c_X_e_X_g" * * if hashed_output=true then the output will be * * shape = [2, 2] * [0, 0]: FingerprintCat64( * Fingerprint64("f"), FingerprintCat64( * Fingerprint64("d"), Fingerprint64("a"))) * [1, 0]: FingerprintCat64( * Fingerprint64("g"), FingerprintCat64( * Fingerprint64("e"), Fingerprint64("b"))) * [1, 1]: FingerprintCat64( * Fingerprint64("g"), FingerprintCat64( * Fingerprint64("e"), Fingerprint64("c"))) * * Arguments: * * scope: A Scope object * * indices: 2-D. Indices of each input {@code SparseTensor}. * * values: 1-D. values of each {@code SparseTensor}. * * shapes: 1-D. Shapes of each {@code SparseTensor}. * * dense_inputs: 2-D. Columns represented by dense {@code Tensor}. * * hashed_output: If true, returns the hash of the cross instead of the string. * This will allow us avoiding string manipulations. * * num_buckets: It is used if hashed_output is true. * output = hashed_value%num_buckets if num_buckets > 0 else hashed_value. * * hash_key: Specify the hash_key that will be used by the {@code FingerprintCat64} * function to combine the crosses fingerprints. * * Returns: * * {@code Output} output_indices: 2-D. Indices of the concatenated {@code SparseTensor}. * * {@code Output} output_values: 1-D. Non-empty values of the concatenated or hashed * {@code SparseTensor}. * * {@code Output} output_shape: 1-D. Shape of the concatenated {@code SparseTensor}. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseCross extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseCross(Pointer p) { super(p); } public SparseCross(@Const @ByRef Scope scope, @ByVal InputList indices, @ByVal InputList values, @ByVal InputList shapes, @ByVal InputList dense_inputs, @Cast("bool") boolean hashed_output, @Cast("tensorflow::int64") long num_buckets, @Cast("tensorflow::int64") long hash_key, @Cast("tensorflow::DataType") int out_type, @Cast("tensorflow::DataType") int internal_type) { super((Pointer)null); allocate(scope, indices, values, shapes, dense_inputs, hashed_output, num_buckets, hash_key, out_type, internal_type); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList indices, @ByVal InputList values, @ByVal InputList shapes, @ByVal InputList dense_inputs, @Cast("bool") boolean hashed_output, @Cast("tensorflow::int64") long num_buckets, @Cast("tensorflow::int64") long hash_key, @Cast("tensorflow::DataType") int out_type, @Cast("tensorflow::DataType") int internal_type); public native @ByRef Operation operation(); public native SparseCross operation(Operation operation); public native @ByRef Output output_indices(); public native SparseCross output_indices(Output output_indices); public native @ByRef Output output_values(); public native SparseCross output_values(Output output_values); public native @ByRef Output output_shape(); public native SparseCross output_shape(Output output_shape); } /** Adds up a SparseTensor and a dense Tensor, using these special rules: * * (1) Broadcasts the dense side to have the same shape as the sparse side, if * eligible; * (2) Then, only the dense values pointed to by the indices of the SparseTensor * participate in the cwise addition. * * By these rules, the result is a logical SparseTensor with exactly the same * indices and shape, but possibly with different non-zero values. The output of * this Op is the resultant non-zero values. * * Arguments: * * scope: A Scope object * * sp_indices: 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * * sp_values: 1-D. {@code N} non-empty values corresponding to {@code sp_indices}. * * sp_shape: 1-D. Shape of the input SparseTensor. * * dense: {@code R}-D. The dense Tensor operand. * * Returns: * * {@code Output}: 1-D. The {@code N} values that are operated on. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseDenseCwiseAdd extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseDenseCwiseAdd(Pointer p) { super(p); } public SparseDenseCwiseAdd(@Const @ByRef Scope scope, @ByVal Input sp_indices, @ByVal Input sp_values, @ByVal Input sp_shape, @ByVal Input dense) { super((Pointer)null); allocate(scope, sp_indices, sp_values, sp_shape, dense); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input sp_indices, @ByVal Input sp_values, @ByVal Input sp_shape, @ByVal Input dense); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SparseDenseCwiseAdd operation(Operation operation); public native @ByRef Output output(); public native SparseDenseCwiseAdd output(Output output); } /** Component-wise divides a SparseTensor by a dense Tensor. * * *Limitation*: this Op only broadcasts the dense side to the sparse side, but not * the other direction. * * Arguments: * * scope: A Scope object * * sp_indices: 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * * sp_values: 1-D. {@code N} non-empty values corresponding to {@code sp_indices}. * * sp_shape: 1-D. Shape of the input SparseTensor. * * dense: {@code R}-D. The dense Tensor operand. * * Returns: * * {@code Output}: 1-D. The {@code N} values that are operated on. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseDenseCwiseDiv extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseDenseCwiseDiv(Pointer p) { super(p); } public SparseDenseCwiseDiv(@Const @ByRef Scope scope, @ByVal Input sp_indices, @ByVal Input sp_values, @ByVal Input sp_shape, @ByVal Input dense) { super((Pointer)null); allocate(scope, sp_indices, sp_values, sp_shape, dense); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input sp_indices, @ByVal Input sp_values, @ByVal Input sp_shape, @ByVal Input dense); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SparseDenseCwiseDiv operation(Operation operation); public native @ByRef Output output(); public native SparseDenseCwiseDiv output(Output output); } /** Component-wise multiplies a SparseTensor by a dense Tensor. * * The output locations corresponding to the implicitly zero elements in the sparse * tensor will be zero (i.e., will not take up storage space), regardless of the * contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN). * * *Limitation*: this Op only broadcasts the dense side to the sparse side, but not * the other direction. * * Arguments: * * scope: A Scope object * * sp_indices: 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * * sp_values: 1-D. {@code N} non-empty values corresponding to {@code sp_indices}. * * sp_shape: 1-D. Shape of the input SparseTensor. * * dense: {@code R}-D. The dense Tensor operand. * * Returns: * * {@code Output}: 1-D. The {@code N} values that are operated on. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseDenseCwiseMul extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseDenseCwiseMul(Pointer p) { super(p); } public SparseDenseCwiseMul(@Const @ByRef Scope scope, @ByVal Input sp_indices, @ByVal Input sp_values, @ByVal Input sp_shape, @ByVal Input dense) { super((Pointer)null); allocate(scope, sp_indices, sp_values, sp_shape, dense); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input sp_indices, @ByVal Input sp_values, @ByVal Input sp_shape, @ByVal Input dense); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SparseDenseCwiseMul operation(Operation operation); public native @ByRef Output output(); public native SparseDenseCwiseMul output(Output output); } /** Fills empty rows in the input 2-D {@code SparseTensor} with a default value. * * The input {@code SparseTensor} is represented via the tuple of inputs * ({@code indices}, {@code values}, {@code dense_shape}). The output {@code SparseTensor} has the * same {@code dense_shape} but with indices {@code output_indices} and values * {@code output_values}. * * This op inserts a single entry for every row that doesn't have any values. * The index is created as {@code [row, 0, ..., 0]} and the inserted value * is {@code default_value}. * * For example, suppose {@code sp_input} has shape {@code [5, 6]} and non-empty values: * * [0, 1]: a * [0, 3]: b * [2, 0]: c * [3, 1]: d * * Rows 1 and 4 are empty, so the output will be of shape {@code [5, 6]} with values: * * [0, 1]: a * [0, 3]: b * [1, 0]: default_value * [2, 0]: c * [3, 1]: d * [4, 0]: default_value * * The output {@code SparseTensor} will be in row-major order and will have the * same shape as the input. * * This op also returns an indicator vector shaped {@code [dense_shape[0]]} such that * * empty_row_indicator[i] = True iff row i was an empty row. * * And a reverse index map vector shaped {@code [indices.shape[0]]} that is used during * backpropagation, * * reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :] * * Arguments: * * scope: A Scope object * * indices: 2-D. the indices of the sparse tensor. * * values: 1-D. the values of the sparse tensor. * * dense_shape: 1-D. the shape of the sparse tensor. * * default_value: 0-D. default value to insert into location {@code [row, 0, ..., 0]} * for rows missing from the input sparse tensor. * output indices: 2-D. the indices of the filled sparse tensor. * * Returns: * * {@code Output} output_indices * * {@code Output} output_values: 1-D. the values of the filled sparse tensor. * * {@code Output} empty_row_indicator: 1-D. whether the dense row was missing in the * input sparse tensor. * * {@code Output} reverse_index_map: 1-D. a map from the input indices to the output indices. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseFillEmptyRows extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseFillEmptyRows(Pointer p) { super(p); } public SparseFillEmptyRows(@Const @ByRef Scope scope, @ByVal Input indices, @ByVal Input values, @ByVal Input dense_shape, @ByVal Input default_value) { super((Pointer)null); allocate(scope, indices, values, dense_shape, default_value); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input indices, @ByVal Input values, @ByVal Input dense_shape, @ByVal Input default_value); public native @ByRef Operation operation(); public native SparseFillEmptyRows operation(Operation operation); public native @ByRef Output output_indices(); public native SparseFillEmptyRows output_indices(Output output_indices); public native @ByRef Output output_values(); public native SparseFillEmptyRows output_values(Output output_values); public native @ByRef Output empty_row_indicator(); public native SparseFillEmptyRows empty_row_indicator(Output empty_row_indicator); public native @ByRef Output reverse_index_map(); public native SparseFillEmptyRows reverse_index_map(Output reverse_index_map); } /** The gradient of SparseFillEmptyRows. * * Takes vectors reverse_index_map, shaped {@code [N]}, and grad_values, * shaped {@code [N_full]}, where {@code N_full >= N} and copies data into either * {@code d_values} or {@code d_default_value}. Here {@code d_values} is shaped {@code [N]} and * {@code d_default_value} is a scalar. * * d_values[j] = grad_values[reverse_index_map[j]] * d_default_value = sum_{k : 0 .. N_full - 1} ( * grad_values[k] * 1{k not in reverse_index_map}) * * Arguments: * * scope: A Scope object * * reverse_index_map: 1-D. The reverse index map from SparseFillEmptyRows. * * grad_values: 1-D. The gradients from backprop. * * Returns: * * {@code Output} d_values: 1-D. The backprop into values. * * {@code Output} d_default_value: 0-D. The backprop into default_value. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseFillEmptyRowsGrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseFillEmptyRowsGrad(Pointer p) { super(p); } public SparseFillEmptyRowsGrad(@Const @ByRef Scope scope, @ByVal Input reverse_index_map, @ByVal Input grad_values) { super((Pointer)null); allocate(scope, reverse_index_map, grad_values); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input reverse_index_map, @ByVal Input grad_values); public native @ByRef Operation operation(); public native SparseFillEmptyRowsGrad operation(Operation operation); public native @ByRef Output d_values(); public native SparseFillEmptyRowsGrad d_values(Output d_values); public native @ByRef Output d_default_value(); public native SparseFillEmptyRowsGrad d_default_value(Output d_default_value); } /** Computes the max of elements across dimensions of a SparseTensor. * * This Op takes a SparseTensor and is the sparse counterpart to * {@code tf.reduce_max()}. In particular, this Op also returns a dense {@code Tensor} * instead of a sparse one. * * Reduces {@code sp_input} along the dimensions given in {@code reduction_axes}. Unless * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code reduction_axes}. If {@code keep_dims} is true, the reduced dimensions are retained * with length 1. * * If {@code reduction_axes} has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. * * Arguments: * * scope: A Scope object * * input_indices: 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * * input_values: 1-D. {@code N} non-empty values corresponding to {@code input_indices}. * * input_shape: 1-D. Shape of the input SparseTensor. * * reduction_axes: 1-D. Length-{@code K} vector containing the reduction axes. * * Optional attributes (see {@code Attrs}): * * keep_dims: If true, retain reduced dimensions with length 1. * * Returns: * * {@code Output}: {@code R-K}-D. The reduced Tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseReduceMax extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseReduceMax(Pointer p) { super(p); } /** Optional attribute setters for SparseReduceMax */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, retain reduced dimensions with length 1. * * Defaults to false */ public native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @Cast("bool") boolean keep_dims_(); public native Attrs keep_dims_(boolean keep_dims_); } public SparseReduceMax(@Const @ByRef Scope scope, @ByVal Input input_indices, @ByVal Input input_values, @ByVal Input input_shape, @ByVal Input reduction_axes) { super((Pointer)null); allocate(scope, input_indices, input_values, input_shape, reduction_axes); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_indices, @ByVal Input input_values, @ByVal Input input_shape, @ByVal Input reduction_axes); public SparseReduceMax(@Const @ByRef Scope scope, @ByVal Input input_indices, @ByVal Input input_values, @ByVal Input input_shape, @ByVal Input reduction_axes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_indices, input_values, input_shape, reduction_axes, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_indices, @ByVal Input input_values, @ByVal Input input_shape, @ByVal Input reduction_axes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native SparseReduceMax operation(Operation operation); public native @ByRef Output output(); public native SparseReduceMax output(Output output); } /** Computes the max of elements across dimensions of a SparseTensor. * * This Op takes a SparseTensor and is the sparse counterpart to * {@code tf.reduce_max()}. In contrast to SparseReduceMax, this Op returns a * SparseTensor. * * Reduces {@code sp_input} along the dimensions given in {@code reduction_axes}. Unless * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code reduction_axes}. If {@code keep_dims} is true, the reduced dimensions are retained * with length 1. * * If {@code reduction_axes} has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. * * Arguments: * * scope: A Scope object * * input_indices: 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * * input_values: 1-D. {@code N} non-empty values corresponding to {@code input_indices}. * * input_shape: 1-D. Shape of the input SparseTensor. * * reduction_axes: 1-D. Length-{@code K} vector containing the reduction axes. * * Optional attributes (see {@code Attrs}): * * keep_dims: If true, retain reduced dimensions with length 1. * * Returns: * * {@code Output} output_indices * * {@code Output} output_values * * {@code Output} output_shape */ @Namespace("tensorflow::ops") @NoOffset public static class SparseReduceMaxSparse extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseReduceMaxSparse(Pointer p) { super(p); } /** Optional attribute setters for SparseReduceMaxSparse */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, retain reduced dimensions with length 1. * * Defaults to false */ public native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @Cast("bool") boolean keep_dims_(); public native Attrs keep_dims_(boolean keep_dims_); } public SparseReduceMaxSparse(@Const @ByRef Scope scope, @ByVal Input input_indices, @ByVal Input input_values, @ByVal Input input_shape, @ByVal Input reduction_axes) { super((Pointer)null); allocate(scope, input_indices, input_values, input_shape, reduction_axes); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_indices, @ByVal Input input_values, @ByVal Input input_shape, @ByVal Input reduction_axes); public SparseReduceMaxSparse(@Const @ByRef Scope scope, @ByVal Input input_indices, @ByVal Input input_values, @ByVal Input input_shape, @ByVal Input reduction_axes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_indices, input_values, input_shape, reduction_axes, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_indices, @ByVal Input input_values, @ByVal Input input_shape, @ByVal Input reduction_axes, @Const @ByRef Attrs attrs); public static native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native SparseReduceMaxSparse operation(Operation operation); public native @ByRef Output output_indices(); public native SparseReduceMaxSparse output_indices(Output output_indices); public native @ByRef Output output_values(); public native SparseReduceMaxSparse output_values(Output output_values); public native @ByRef Output output_shape(); public native SparseReduceMaxSparse output_shape(Output output_shape); } /** Computes the sum of elements across dimensions of a SparseTensor. * * This Op takes a SparseTensor and is the sparse counterpart to * {@code tf.reduce_sum()}. In particular, this Op also returns a dense {@code Tensor} * instead of a sparse one. * * Reduces {@code sp_input} along the dimensions given in {@code reduction_axes}. Unless * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code reduction_axes}. If {@code keep_dims} is true, the reduced dimensions are retained * with length 1. * * If {@code reduction_axes} has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. * * Arguments: * * scope: A Scope object * * input_indices: 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * * input_values: 1-D. {@code N} non-empty values corresponding to {@code input_indices}. * * input_shape: 1-D. Shape of the input SparseTensor. * * reduction_axes: 1-D. Length-{@code K} vector containing the reduction axes. * * Optional attributes (see {@code Attrs}): * * keep_dims: If true, retain reduced dimensions with length 1. * * Returns: * * {@code Output}: {@code R-K}-D. The reduced Tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseReduceSum extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseReduceSum(Pointer p) { super(p); } /** Optional attribute setters for SparseReduceSum */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, retain reduced dimensions with length 1. * * Defaults to false */ public native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @Cast("bool") boolean keep_dims_(); public native Attrs keep_dims_(boolean keep_dims_); } public SparseReduceSum(@Const @ByRef Scope scope, @ByVal Input input_indices, @ByVal Input input_values, @ByVal Input input_shape, @ByVal Input reduction_axes) { super((Pointer)null); allocate(scope, input_indices, input_values, input_shape, reduction_axes); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_indices, @ByVal Input input_values, @ByVal Input input_shape, @ByVal Input reduction_axes); public SparseReduceSum(@Const @ByRef Scope scope, @ByVal Input input_indices, @ByVal Input input_values, @ByVal Input input_shape, @ByVal Input reduction_axes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_indices, input_values, input_shape, reduction_axes, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_indices, @ByVal Input input_values, @ByVal Input input_shape, @ByVal Input reduction_axes, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native SparseReduceSum operation(Operation operation); public native @ByRef Output output(); public native SparseReduceSum output(Output output); } /** Computes the sum of elements across dimensions of a SparseTensor. * * This Op takes a SparseTensor and is the sparse counterpart to * {@code tf.reduce_sum()}. In contrast to SparseReduceSum, this Op returns a * SparseTensor. * * Reduces {@code sp_input} along the dimensions given in {@code reduction_axes}. Unless * {@code keep_dims} is true, the rank of the tensor is reduced by 1 for each entry in * {@code reduction_axes}. If {@code keep_dims} is true, the reduced dimensions are retained * with length 1. * * If {@code reduction_axes} has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. * * Arguments: * * scope: A Scope object * * input_indices: 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * * input_values: 1-D. {@code N} non-empty values corresponding to {@code input_indices}. * * input_shape: 1-D. Shape of the input SparseTensor. * * reduction_axes: 1-D. Length-{@code K} vector containing the reduction axes. * * Optional attributes (see {@code Attrs}): * * keep_dims: If true, retain reduced dimensions with length 1. * * Returns: * * {@code Output} output_indices * * {@code Output} output_values * * {@code Output} output_shape */ @Namespace("tensorflow::ops") @NoOffset public static class SparseReduceSumSparse extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseReduceSumSparse(Pointer p) { super(p); } /** Optional attribute setters for SparseReduceSumSparse */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, retain reduced dimensions with length 1. * * Defaults to false */ public native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @Cast("bool") boolean keep_dims_(); public native Attrs keep_dims_(boolean keep_dims_); } public SparseReduceSumSparse(@Const @ByRef Scope scope, @ByVal Input input_indices, @ByVal Input input_values, @ByVal Input input_shape, @ByVal Input reduction_axes) { super((Pointer)null); allocate(scope, input_indices, input_values, input_shape, reduction_axes); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_indices, @ByVal Input input_values, @ByVal Input input_shape, @ByVal Input reduction_axes); public SparseReduceSumSparse(@Const @ByRef Scope scope, @ByVal Input input_indices, @ByVal Input input_values, @ByVal Input input_shape, @ByVal Input reduction_axes, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input_indices, input_values, input_shape, reduction_axes, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_indices, @ByVal Input input_values, @ByVal Input input_shape, @ByVal Input reduction_axes, @Const @ByRef Attrs attrs); public static native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native SparseReduceSumSparse operation(Operation operation); public native @ByRef Output output_indices(); public native SparseReduceSumSparse output_indices(Output output_indices); public native @ByRef Output output_values(); public native SparseReduceSumSparse output_values(Output output_values); public native @ByRef Output output_shape(); public native SparseReduceSumSparse output_shape(Output output_shape); } /** Reorders a SparseTensor into the canonical, row-major ordering. * * Note that by convention, all sparse ops preserve the canonical ordering along * increasing dimension number. The only time ordering can be violated is during * manual manipulation of the indices and values vectors to add entries. * * Reordering does not affect the shape of the SparseTensor. * * If the tensor has rank {@code R} and {@code N} non-empty values, {@code input_indices} has * shape {@code [N, R]}, input_values has length {@code N}, and input_shape has length {@code R}. * * Arguments: * * scope: A Scope object * * input_indices: 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. * * input_values: 1-D. {@code N} non-empty values corresponding to {@code input_indices}. * * input_shape: 1-D. Shape of the input SparseTensor. * * Returns: * * {@code Output} output_indices: 2-D. {@code N x R} matrix with the same indices as input_indices, but * in canonical row-major ordering. * * {@code Output} output_values: 1-D. {@code N} non-empty values corresponding to {@code output_indices}. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseReorder extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseReorder(Pointer p) { super(p); } public SparseReorder(@Const @ByRef Scope scope, @ByVal Input input_indices, @ByVal Input input_values, @ByVal Input input_shape) { super((Pointer)null); allocate(scope, input_indices, input_values, input_shape); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_indices, @ByVal Input input_values, @ByVal Input input_shape); public native @ByRef Operation operation(); public native SparseReorder operation(Operation operation); public native @ByRef Output output_indices(); public native SparseReorder output_indices(Output output_indices); public native @ByRef Output output_values(); public native SparseReorder output_values(Output output_values); } /** Reshapes a SparseTensor to represent values in a new dense shape. * * This operation has the same semantics as reshape on the represented dense * tensor. The {@code input_indices} are recomputed based on the requested {@code new_shape}. * * If one component of {@code new_shape} is the special value -1, the size of that * dimension is computed so that the total dense size remains constant. At * most one component of {@code new_shape} can be -1. The number of dense elements * implied by {@code new_shape} must be the same as the number of dense elements * originally implied by {@code input_shape}. * * Reshaping does not affect the order of values in the SparseTensor. * * If the input tensor has rank {@code R_in} and {@code N} non-empty values, and {@code new_shape} * has length {@code R_out}, then {@code input_indices} has shape {@code [N, R_in]}, * {@code input_shape} has length {@code R_in}, {@code output_indices} has shape {@code [N, R_out]}, and * {@code output_shape} has length {@code R_out}. * * Arguments: * * scope: A Scope object * * input_indices: 2-D. {@code N x R_in} matrix with the indices of non-empty values in a * SparseTensor. * * input_shape: 1-D. {@code R_in} vector with the input SparseTensor's dense shape. * * new_shape: 1-D. {@code R_out} vector with the requested new dense shape. * * Returns: * * {@code Output} output_indices: 2-D. {@code N x R_out} matrix with the updated indices of non-empty * values in the output SparseTensor. * * {@code Output} output_shape: 1-D. {@code R_out} vector with the full dense shape of the output * SparseTensor. This is the same as {@code new_shape} but with any -1 dimensions * filled in. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseReshape extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseReshape(Pointer p) { super(p); } public SparseReshape(@Const @ByRef Scope scope, @ByVal Input input_indices, @ByVal Input input_shape, @ByVal Input new_shape) { super((Pointer)null); allocate(scope, input_indices, input_shape, new_shape); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input_indices, @ByVal Input input_shape, @ByVal Input new_shape); public native @ByRef Operation operation(); public native SparseReshape operation(Operation operation); public native @ByRef Output output_indices(); public native SparseReshape output_indices(Output output_indices); public native @ByRef Output output_shape(); public native SparseReshape output_shape(Output output_shape); } /** Slice a {@code SparseTensor} based on the {@code start} and {@code size}. * * For example, if the input is * * input_tensor = shape = [2, 7] * [ a d e ] * [b c ] * * Graphically the output tensors are: * * sparse_slice([0, 0], [2, 4]) = shape = [2, 4] * [ a ] * [b c ] * * sparse_slice([0, 4], [2, 3]) = shape = [2, 3] * [ d e ] * [ ] * * Arguments: * * scope: A Scope object * * indices: 2-D tensor represents the indices of the sparse tensor. * * values: 1-D tensor represents the values of the sparse tensor. * * shape: 1-D. tensor represents the shape of the sparse tensor. * * start: 1-D. tensor represents the start of the slice. * * size: 1-D. tensor represents the size of the slice. * output indices: A list of 1-D tensors represents the indices of the output * sparse tensors. * * Returns: * * {@code Output} output_indices * * {@code Output} output_values: A list of 1-D tensors represents the values of the output sparse * tensors. * * {@code Output} output_shape: A list of 1-D tensors represents the shape of the output sparse * tensors. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseSlice extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseSlice(Pointer p) { super(p); } public SparseSlice(@Const @ByRef Scope scope, @ByVal Input indices, @ByVal Input values, @ByVal Input shape, @ByVal Input start, @ByVal Input size) { super((Pointer)null); allocate(scope, indices, values, shape, start, size); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input indices, @ByVal Input values, @ByVal Input shape, @ByVal Input start, @ByVal Input size); public native @ByRef Operation operation(); public native SparseSlice operation(Operation operation); public native @ByRef Output output_indices(); public native SparseSlice output_indices(Output output_indices); public native @ByRef Output output_values(); public native SparseSlice output_values(Output output_values); public native @ByRef Output output_shape(); public native SparseSlice output_shape(Output output_shape); } /** The gradient operator for the SparseSlice op. * * This op takes in the upstream gradient w.r.t. non-empty values of * the sliced {@code SparseTensor}, and outputs the gradients w.r.t. * the non-empty values of input {@code SparseTensor}. * * Arguments: * * scope: A Scope object * * backprop_val_grad: 1-D. The gradient with respect to * the non-empty values of the sliced {@code SparseTensor}. * * input_indices: 2-D. The {@code indices} of the input {@code SparseTensor}. * * input_start: 1-D. tensor represents the start of the slice. * * output_indices: 2-D. The {@code indices} of the sliced {@code SparseTensor}. * * Returns: * * {@code Output}: 1-D. The gradient with respect to the non-empty values of input {@code SparseTensor}. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseSliceGrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseSliceGrad(Pointer p) { super(p); } public SparseSliceGrad(@Const @ByRef Scope scope, @ByVal Input backprop_val_grad, @ByVal Input input_indices, @ByVal Input input_start, @ByVal Input output_indices) { super((Pointer)null); allocate(scope, backprop_val_grad, input_indices, input_start, output_indices); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input backprop_val_grad, @ByVal Input input_indices, @ByVal Input input_start, @ByVal Input output_indices); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SparseSliceGrad operation(Operation operation); public native @ByRef Output val_grad(); public native SparseSliceGrad val_grad(Output val_grad); } /** Applies softmax to a batched N-D {@code SparseTensor}. * * The inputs represent an N-D SparseTensor with logical shape {@code [..., B, C]} * (where {@code N >= 2}), and with indices sorted in the canonical lexicographic order. * * This op is equivalent to applying the normal {@code tf.nn.softmax()} to each innermost * logical submatrix with shape {@code [B, C]}, but with the catch that *the implicitly * zero elements do not participate*. Specifically, the algorithm is equivalent * to the following: * * (1) Applies {@code tf.nn.softmax()} to a densified view of each innermost submatrix * with shape {@code [B, C]}, along the size-C dimension; * (2) Masks out the original implicitly-zero locations; * (3) Renormalizes the remaining elements. * * Hence, the {@code SparseTensor} result has exactly the same non-zero indices and * shape. * * Arguments: * * scope: A Scope object * * sp_indices: 2-D. {@code NNZ x R} matrix with the indices of non-empty values in a * SparseTensor, in canonical ordering. * * sp_values: 1-D. {@code NNZ} non-empty values corresponding to {@code sp_indices}. * * sp_shape: 1-D. Shape of the input SparseTensor. * * Returns: * * {@code Output}: 1-D. The {@code NNZ} values for the result {@code SparseTensor}. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseSoftmax extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseSoftmax(Pointer p) { super(p); } public SparseSoftmax(@Const @ByRef Scope scope, @ByVal Input sp_indices, @ByVal Input sp_values, @ByVal Input sp_shape) { super((Pointer)null); allocate(scope, sp_indices, sp_values, sp_shape); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input sp_indices, @ByVal Input sp_values, @ByVal Input sp_shape); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SparseSoftmax operation(Operation operation); public native @ByRef Output output(); public native SparseSoftmax output(Output output); } /** Returns the element-wise max of two SparseTensors. * * Assumes the two SparseTensors have the same shape, i.e., no broadcasting. * * Arguments: * * scope: A Scope object * * a_indices: 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, in the canonical lexicographic ordering. * * a_values: 1-D. {@code N} non-empty values corresponding to {@code a_indices}. * * a_shape: 1-D. Shape of the input SparseTensor. * * b_indices: counterpart to {@code a_indices} for the other operand. * * b_values: counterpart to {@code a_values} for the other operand; must be of the same dtype. * * b_shape: counterpart to {@code a_shape} for the other operand; the two shapes must be equal. * * Returns: * * {@code Output} output_indices: 2-D. The indices of the output SparseTensor. * * {@code Output} output_values: 1-D. The values of the output SparseTensor. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseSparseMaximum extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseSparseMaximum(Pointer p) { super(p); } public SparseSparseMaximum(@Const @ByRef Scope scope, @ByVal Input a_indices, @ByVal Input a_values, @ByVal Input a_shape, @ByVal Input b_indices, @ByVal Input b_values, @ByVal Input b_shape) { super((Pointer)null); allocate(scope, a_indices, a_values, a_shape, b_indices, b_values, b_shape); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input a_indices, @ByVal Input a_values, @ByVal Input a_shape, @ByVal Input b_indices, @ByVal Input b_values, @ByVal Input b_shape); public native @ByRef Operation operation(); public native SparseSparseMaximum operation(Operation operation); public native @ByRef Output output_indices(); public native SparseSparseMaximum output_indices(Output output_indices); public native @ByRef Output output_values(); public native SparseSparseMaximum output_values(Output output_values); } /** Returns the element-wise min of two SparseTensors. * * Assumes the two SparseTensors have the same shape, i.e., no broadcasting. * * Arguments: * * scope: A Scope object * * a_indices: 2-D. {@code N x R} matrix with the indices of non-empty values in a * SparseTensor, in the canonical lexicographic ordering. * * a_values: 1-D. {@code N} non-empty values corresponding to {@code a_indices}. * * a_shape: 1-D. Shape of the input SparseTensor. * * b_indices: counterpart to {@code a_indices} for the other operand. * * b_values: counterpart to {@code a_values} for the other operand; must be of the same dtype. * * b_shape: counterpart to {@code a_shape} for the other operand; the two shapes must be equal. * * Returns: * * {@code Output} output_indices: 2-D. The indices of the output SparseTensor. * * {@code Output} output_values: 1-D. The values of the output SparseTensor. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseSparseMinimum extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseSparseMinimum(Pointer p) { super(p); } public SparseSparseMinimum(@Const @ByRef Scope scope, @ByVal Input a_indices, @ByVal Input a_values, @ByVal Input a_shape, @ByVal Input b_indices, @ByVal Input b_values, @ByVal Input b_shape) { super((Pointer)null); allocate(scope, a_indices, a_values, a_shape, b_indices, b_values, b_shape); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input a_indices, @ByVal Input a_values, @ByVal Input a_shape, @ByVal Input b_indices, @ByVal Input b_values, @ByVal Input b_shape); public native @ByRef Operation operation(); public native SparseSparseMinimum operation(Operation operation); public native @ByRef Output output_indices(); public native SparseSparseMinimum output_indices(Output output_indices); public native @ByRef Output output_values(); public native SparseSparseMinimum output_values(Output output_values); } /** Split a {@code SparseTensor} into {@code num_split} tensors along one dimension. * * If the {@code shape[split_dim]} is not an integer multiple of {@code num_split}. Slices * {@code [0 : shape[split_dim] % num_split]} gets one extra dimension. * For example, if {@code split_dim = 1} and {@code num_split = 2} and the input is * * input_tensor = shape = [2, 7] * [ a d e ] * [b c ] * * Graphically the output tensors are: * * output_tensor[0] = shape = [2, 4] * [ a ] * [b c ] * * output_tensor[1] = shape = [2, 3] * [ d e ] * [ ] * * Arguments: * * scope: A Scope object * * split_dim: 0-D. The dimension along which to split. Must be in the range * {@code [0, rank(shape))}. * * indices: 2-D tensor represents the indices of the sparse tensor. * * values: 1-D tensor represents the values of the sparse tensor. * * shape: 1-D. tensor represents the shape of the sparse tensor. * output indices: A list of 1-D tensors represents the indices of the output * sparse tensors. * * num_split: The number of ways to split. * * Returns: * * {@code OutputList} output_indices * * {@code OutputList} output_values: A list of 1-D tensors represents the values of the output sparse * tensors. * * {@code OutputList} output_shape: A list of 1-D tensors represents the shape of the output sparse * tensors. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseSplit extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseSplit(Pointer p) { super(p); } public SparseSplit(@Const @ByRef Scope scope, @ByVal Input split_dim, @ByVal Input indices, @ByVal Input values, @ByVal Input shape, @Cast("tensorflow::int64") long num_split) { super((Pointer)null); allocate(scope, split_dim, indices, values, shape, num_split); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input split_dim, @ByVal Input indices, @ByVal Input values, @ByVal Input shape, @Cast("tensorflow::int64") long num_split); public native @ByRef Operation operation(); public native SparseSplit operation(Operation operation); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector output_indices(); public native SparseSplit output_indices(OutputVector output_indices); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector output_values(); public native SparseSplit output_values(OutputVector output_values); public native @ByRef @Cast("tensorflow::OutputList*") OutputVector output_shape(); public native SparseSplit output_shape(OutputVector output_shape); } /** Adds up a {@code SparseTensor} and a dense {@code Tensor}, producing a dense {@code Tensor}. * * This Op does not require {@code a_indices} be sorted in standard lexicographic order. * * Arguments: * * scope: A Scope object * * a_indices: 2-D. The {@code indices} of the {@code SparseTensor}, with shape {@code [nnz, ndims]}. * * a_values: 1-D. The {@code values} of the {@code SparseTensor}, with shape {@code [nnz]}. * * a_shape: 1-D. The {@code shape} of the {@code SparseTensor}, with shape {@code [ndims]}. * * b: {@code ndims}-D Tensor. With shape {@code a_shape}. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseTensorDenseAdd extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseTensorDenseAdd(Pointer p) { super(p); } public SparseTensorDenseAdd(@Const @ByRef Scope scope, @ByVal Input a_indices, @ByVal Input a_values, @ByVal Input a_shape, @ByVal Input b) { super((Pointer)null); allocate(scope, a_indices, a_values, a_shape, b); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input a_indices, @ByVal Input a_values, @ByVal Input a_shape, @ByVal Input b); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native SparseTensorDenseAdd operation(Operation operation); public native @ByRef Output output(); public native SparseTensorDenseAdd output(Output output); } /** Multiply SparseTensor (of rank 2) "A" by dense matrix "B". * * No validity checking is performed on the indices of A. However, the following * input format is recommended for optimal behavior: * * if adjoint_a == false: * A should be sorted in lexicographically increasing order. Use SparseReorder * if you're not sure. * if adjoint_a == true: * A should be sorted in order of increasing dimension 1 (i.e., "column major" * order instead of "row major" order). * * Arguments: * * scope: A Scope object * * a_indices: 2-D. The {@code indices} of the {@code SparseTensor}, size {@code [nnz, 2]} Matrix. * * a_values: 1-D. The {@code values} of the {@code SparseTensor}, size {@code [nnz]} Vector. * * a_shape: 1-D. The {@code shape} of the {@code SparseTensor}, size {@code [2]} Vector. * * b: 2-D. A dense Matrix. * * Optional attributes (see {@code Attrs}): * * adjoint_a: Use the adjoint of A in the matrix multiply. If A is complex, this * is transpose(conj(A)). Otherwise it's transpose(A). * * adjoint_b: Use the adjoint of B in the matrix multiply. If B is complex, this * is transpose(conj(B)). Otherwise it's transpose(B). * * Returns: * * {@code Output}: The product tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseTensorDenseMatMul extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseTensorDenseMatMul(Pointer p) { super(p); } /** Optional attribute setters for SparseTensorDenseMatMul */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Use the adjoint of A in the matrix multiply. If A is complex, this * is transpose(conj(A)). Otherwise it's transpose(A). * * Defaults to false */ /// public native @ByVal Attrs AdjointA(@Cast("bool") boolean x); /** Use the adjoint of B in the matrix multiply. If B is complex, this * is transpose(conj(B)). Otherwise it's transpose(B). * * Defaults to false */ public native @ByVal Attrs AdjointB(@Cast("bool") boolean x); public native @Cast("bool") boolean adjoint_a_(); public native Attrs adjoint_a_(boolean adjoint_a_); public native @Cast("bool") boolean adjoint_b_(); public native Attrs adjoint_b_(boolean adjoint_b_); } public SparseTensorDenseMatMul(@Const @ByRef Scope scope, @ByVal Input a_indices, @ByVal Input a_values, @ByVal Input a_shape, @ByVal Input b) { super((Pointer)null); allocate(scope, a_indices, a_values, a_shape, b); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input a_indices, @ByVal Input a_values, @ByVal Input a_shape, @ByVal Input b); public SparseTensorDenseMatMul(@Const @ByRef Scope scope, @ByVal Input a_indices, @ByVal Input a_values, @ByVal Input a_shape, @ByVal Input b, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, a_indices, a_values, a_shape, b, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input a_indices, @ByVal Input a_values, @ByVal Input a_shape, @ByVal Input b, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs AdjointA(@Cast("bool") boolean x); public static native @ByVal Attrs AdjointB(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native SparseTensorDenseMatMul operation(Operation operation); public native @ByRef Output product(); public native SparseTensorDenseMatMul product(Output product); } /** Converts a sparse representation into a dense tensor. * * Builds an array {@code dense} with shape {@code output_shape} such that * *
{@code
 *  # If sparse_indices is scalar
 *  dense[i] = (i == sparse_indices ? sparse_values : default_value)
 * 
 *  # If sparse_indices is a vector, then for each i
 *  dense[sparse_indices[i]] = sparse_values[i]
 * 
 *  # If sparse_indices is an n by d matrix, then for each i in [0, n)
 *  dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
 *  }
* * All other values in {@code dense} are set to {@code default_value}. If {@code sparse_values} is a * scalar, all sparse indices are set to this single value. * * Indices should be sorted in lexicographic order, and indices must not * contain any repeats. If {@code validate_indices} is true, these properties * are checked during execution. * * Arguments: * * scope: A Scope object * * sparse_indices: 0-D, 1-D, or 2-D. {@code sparse_indices[i]} contains the complete * index where {@code sparse_values[i]} will be placed. * * output_shape: 1-D. Shape of the dense output tensor. * * sparse_values: 1-D. Values corresponding to each row of {@code sparse_indices}, * or a scalar value to be used for all sparse indices. * * default_value: Scalar value to set for indices not specified in * {@code sparse_indices}. * * Optional attributes (see {@code Attrs}): * * validate_indices: If true, indices are checked to make sure they are sorted in * lexicographic order and that there are no repeats. * * Returns: * * {@code Output}: Dense output tensor of shape {@code output_shape}. */ @Namespace("tensorflow::ops") @NoOffset public static class SparseToDense extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseToDense(Pointer p) { super(p); } /** Optional attribute setters for SparseToDense */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, indices are checked to make sure they are sorted in * lexicographic order and that there are no repeats. * * Defaults to true */ public native @ByVal Attrs ValidateIndices(@Cast("bool") boolean x); public native @Cast("bool") boolean validate_indices_(); public native Attrs validate_indices_(boolean validate_indices_); } public SparseToDense(@Const @ByRef Scope scope, @ByVal Input sparse_indices, @ByVal Input output_shape, @ByVal Input sparse_values, @ByVal Input default_value) { super((Pointer)null); allocate(scope, sparse_indices, output_shape, sparse_values, default_value); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input sparse_indices, @ByVal Input output_shape, @ByVal Input sparse_values, @ByVal Input default_value); public SparseToDense(@Const @ByRef Scope scope, @ByVal Input sparse_indices, @ByVal Input output_shape, @ByVal Input sparse_values, @ByVal Input default_value, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, sparse_indices, output_shape, sparse_values, default_value, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input sparse_indices, @ByVal Input output_shape, @ByVal Input sparse_values, @ByVal Input default_value, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs ValidateIndices(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native SparseToDense operation(Operation operation); public native @ByRef Output dense(); public native SparseToDense dense(Output dense); } /** Read {@code SparseTensors} from a {@code SparseTensorsMap} and concatenate them. * * The input {@code sparse_handles} must be an {@code int64} matrix of shape {@code [N, 1]} where * {@code N} is the minibatch size and the rows correspond to the output handles of * {@code AddSparseToTensorsMap} or {@code AddManySparseToTensorsMap}. The ranks of the * original {@code SparseTensor} objects that went into the given input ops must all * match. When the final {@code SparseTensor} is created, it has rank one * higher than the ranks of the incoming {@code SparseTensor} objects * (they have been concatenated along a new row dimension on the left). * * The output {@code SparseTensor} object's shape values for all dimensions but the * first are the max across the input {@code SparseTensor} objects' shape values * for the corresponding dimensions. Its first shape value is {@code N}, the minibatch * size. * * The input {@code SparseTensor} objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this * step run {@code SparseReorder} to restore index ordering. * * For example, if the handles represent an input, which is a {@code [2, 3]} matrix * representing two original {@code SparseTensor} objects: * *
{@code
 *      index = [ 0]
 *              [10]
 *              [20]
 *      values = [1, 2, 3]
 *      shape = [50]
 *  }
* * and * *
{@code
 *      index = [ 2]
 *              [10]
 *      values = [4, 5]
 *      shape = [30]
 *  }
* * then the final {@code SparseTensor} will be: * *
{@code
 *      index = [0  0]
 *              [0 10]
 *              [0 20]
 *              [1  2]
 *              [1 10]
 *      values = [1, 2, 3, 4, 5]
 *      shape = [2 50]
 *  }
* * Arguments: * * scope: A Scope object * * sparse_handles: 1-D, The {@code N} serialized {@code SparseTensor} objects. * Shape: {@code [N]}. * * dtype: The {@code dtype} of the {@code SparseTensor} objects stored in the * {@code SparseTensorsMap}. * * Optional attributes (see {@code Attrs}): * * container: The container name for the {@code SparseTensorsMap} read by this op. * * shared_name: The shared name for the {@code SparseTensorsMap} read by this op. * It should not be blank; rather the {@code shared_name} or unique Operation name * of the Op that created the original {@code SparseTensorsMap} should be used. * * Returns: * * {@code Output} sparse_indices: 2-D. The {@code indices} of the minibatch {@code SparseTensor}. * * {@code Output} sparse_values: 1-D. The {@code values} of the minibatch {@code SparseTensor}. * * {@code Output} sparse_shape: 1-D. The {@code shape} of the minibatch {@code SparseTensor}. */ @Namespace("tensorflow::ops") @NoOffset public static class TakeManySparseFromTensorsMap extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TakeManySparseFromTensorsMap(Pointer p) { super(p); } /** Optional attribute setters for TakeManySparseFromTensorsMap */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The container name for the {@code SparseTensorsMap} read by this op. * * Defaults to "" */ /// public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** The shared name for the {@code SparseTensorsMap} read by this op. * It should not be blank; rather the {@code shared_name} or unique Operation name * of the Op that created the original {@code SparseTensorsMap} should be used. * * Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public TakeManySparseFromTensorsMap(@Const @ByRef Scope scope, @ByVal Input sparse_handles, @Cast("tensorflow::DataType") int dtype) { super((Pointer)null); allocate(scope, sparse_handles, dtype); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input sparse_handles, @Cast("tensorflow::DataType") int dtype); public TakeManySparseFromTensorsMap(@Const @ByRef Scope scope, @ByVal Input sparse_handles, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, sparse_handles, dtype, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input sparse_handles, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native TakeManySparseFromTensorsMap operation(Operation operation); public native @ByRef Output sparse_indices(); public native TakeManySparseFromTensorsMap sparse_indices(Output sparse_indices); public native @ByRef Output sparse_values(); public native TakeManySparseFromTensorsMap sparse_values(Output sparse_values); public native @ByRef Output sparse_shape(); public native TakeManySparseFromTensorsMap sparse_shape(Output sparse_shape); } /** \} */ // namespace ops // namespace tensorflow // #endif // TENSORFLOW_CC_OPS_SPARSE_OPS_H_ // Parsed from tensorflow/cc/ops/state_ops.h // This file is MACHINE GENERATED! Do not edit. // #ifndef TENSORFLOW_CC_OPS_STATE_OPS_H_ // #define TENSORFLOW_CC_OPS_STATE_OPS_H_ // This file is MACHINE GENERATED! Do not edit. // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/cc/framework/scope.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/gtl/array_slice.h" /** \defgroup state_ops State Ops * \{

* Update 'ref' by assigning 'value' to it. * * This operation outputs "ref" after the assignment is done. * This makes it easier to chain operations that need to use the reset value. * * Arguments: * * scope: A Scope object * * ref: Should be from a {@code Variable} node. May be uninitialized. * * value: The value to be assigned to the variable. * * Optional attributes (see {@code Attrs}): * * validate_shape: If true, the operation will validate that the shape * of 'value' matches the shape of the Tensor being assigned to. If false, * 'ref' will take on the shape of 'value'. * * use_locking: If True, the assignment will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * {@code Output}: = Same as "ref". Returned as a convenience for operations that want * to use the new value after the variable has been reset. */ @Namespace("tensorflow::ops") @NoOffset public static class Assign extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Assign(Pointer p) { super(p); } /** Optional attribute setters for Assign */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If true, the operation will validate that the shape * of 'value' matches the shape of the Tensor being assigned to. If false, * 'ref' will take on the shape of 'value'. * * Defaults to true */ /// public native @ByVal Attrs ValidateShape(@Cast("bool") boolean x); /** If True, the assignment will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to true */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean validate_shape_(); public native Attrs validate_shape_(boolean validate_shape_); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public Assign(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input value) { super((Pointer)null); allocate(scope, ref, value); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input value); public Assign(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input value, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, ref, value, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input value, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs ValidateShape(@Cast("bool") boolean x); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native Assign operation(Operation operation); public native @ByRef Output output_ref(); public native Assign output_ref(Output output_ref); } /** Update 'ref' by adding 'value' to it. * * This operation outputs "ref" after the update is done. * This makes it easier to chain operations that need to use the reset value. * * Arguments: * * scope: A Scope object * * ref: Should be from a {@code Variable} node. * * value: The value to be added to the variable. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, the addition will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * {@code Output}: = Same as "ref". Returned as a convenience for operations that want * to use the new value after the variable has been updated. */ @Namespace("tensorflow::ops") @NoOffset public static class AssignAdd extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AssignAdd(Pointer p) { super(p); } /** Optional attribute setters for AssignAdd */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, the addition will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public AssignAdd(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input value) { super((Pointer)null); allocate(scope, ref, value); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input value); public AssignAdd(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input value, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, ref, value, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input value, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native AssignAdd operation(Operation operation); public native @ByRef Output output_ref(); public native AssignAdd output_ref(Output output_ref); } /** Update 'ref' by subtracting 'value' from it. * * This operation outputs "ref" after the update is done. * This makes it easier to chain operations that need to use the reset value. * * Arguments: * * scope: A Scope object * * ref: Should be from a {@code Variable} node. * * value: The value to be subtracted to the variable. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * {@code Output}: = Same as "ref". Returned as a convenience for operations that want * to use the new value after the variable has been updated. */ @Namespace("tensorflow::ops") @NoOffset public static class AssignSub extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AssignSub(Pointer p) { super(p); } /** Optional attribute setters for AssignSub */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public AssignSub(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input value) { super((Pointer)null); allocate(scope, ref, value); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input value); public AssignSub(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input value, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, ref, value, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input value, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native AssignSub operation(Operation operation); public native @ByRef Output output_ref(); public native AssignSub output_ref(Output output_ref); } /** Increments 'ref' until it reaches 'limit'. * * Arguments: * * scope: A Scope object * * ref: Should be from a scalar {@code Variable} node. * * limit: If incrementing ref would bring it above limit, instead generates an * 'OutOfRange' error. * * Returns: * * {@code Output}: A copy of the input before increment. If nothing else modifies the * input, the values produced will all be distinct. */ @Namespace("tensorflow::ops") @NoOffset public static class CountUpTo extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CountUpTo(Pointer p) { super(p); } public CountUpTo(@Const @ByRef Scope scope, @ByVal Input ref, @Cast("tensorflow::int64") long limit) { super((Pointer)null); allocate(scope, ref, limit); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @Cast("tensorflow::int64") long limit); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native CountUpTo operation(Operation operation); public native @ByRef Output output(); public native CountUpTo output(Output output); } /** Destroys the temporary variable and returns its final value. * * Sets output to the value of the Tensor pointed to by 'ref', then destroys * the temporary variable called 'var_name'. * All other uses of 'ref' *must* have executed before this op. * This is typically achieved by chaining the ref through each assign op, or by * using control dependencies. * * Outputs the final value of the tensor pointed to by 'ref'. * * Arguments: * * scope: A Scope object * * ref: A reference to the temporary variable tensor. * * var_name: Name of the temporary variable, usually the name of the matching * 'TemporaryVariable' op. * * Returns: * * {@code Output}: The value tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class DestroyTemporaryVariable extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DestroyTemporaryVariable(Pointer p) { super(p); } public DestroyTemporaryVariable(@Const @ByRef Scope scope, @ByVal Input ref, @StringPiece BytePointer var_name) { super((Pointer)null); allocate(scope, ref, var_name); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @StringPiece BytePointer var_name); public DestroyTemporaryVariable(@Const @ByRef Scope scope, @ByVal Input ref, @StringPiece String var_name) { super((Pointer)null); allocate(scope, ref, var_name); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @StringPiece String var_name); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native DestroyTemporaryVariable operation(Operation operation); public native @ByRef Output value(); public native DestroyTemporaryVariable value(Output value); } /** Checks whether a tensor has been initialized. * * Outputs boolean scalar indicating whether the tensor has been initialized. * * Arguments: * * scope: A Scope object * * ref: Should be from a {@code Variable} node. May be uninitialized. * * Returns: * * {@code Output}: The is_initialized tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class IsVariableInitialized extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IsVariableInitialized(Pointer p) { super(p); } public IsVariableInitialized(@Const @ByRef Scope scope, @ByVal Input ref) { super((Pointer)null); allocate(scope, ref); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native IsVariableInitialized operation(Operation operation); public native @ByRef Output is_initialized(); public native IsVariableInitialized is_initialized(Output is_initialized); } /** Increments variable pointed to by 'resource' until it reaches 'limit'. * * Arguments: * * scope: A Scope object * * resource: Should be from a scalar {@code Variable} node. * * limit: If incrementing ref would bring it above limit, instead generates an * 'OutOfRange' error. * * Returns: * * {@code Output}: A copy of the input before increment. If nothing else modifies the * input, the values produced will all be distinct. */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceCountUpTo extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceCountUpTo(Pointer p) { super(p); } public ResourceCountUpTo(@Const @ByRef Scope scope, @ByVal Input resource, @Cast("tensorflow::int64") long limit, @Cast("tensorflow::DataType") int T) { super((Pointer)null); allocate(scope, resource, limit, T); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input resource, @Cast("tensorflow::int64") long limit, @Cast("tensorflow::DataType") int T); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native ResourceCountUpTo operation(Operation operation); public native @ByRef Output output(); public native ResourceCountUpTo output(Output output); } /** Adds sparse {@code updates} to individual values or slices within a given * * variable according to {@code indices}. * * {@code ref} is a {@code Tensor} with rank {@code P} and {@code indices} is a {@code Tensor} of rank {@code Q}. * * {@code indices} must be integer tensor, containing indices into {@code ref}. * It must be shape {@code [d_0, ..., d_{Q-2}, K]} where {@code 0 < K <= P}. * * The innermost dimension of {@code indices} (with length {@code K}) corresponds to * indices into elements (if {@code K = P}) or slices (if {@code K < P}) along the {@code K}th * dimension of {@code ref}. * * {@code updates} is {@code Tensor} of rank {@code Q-1+P-K} with shape: * *

{@code
 *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
 *  }
* * For example, say we want to update 4 scattered elements to a rank-1 tensor to * 8 elements. In Python, that update would look like this: * *
{@code python
 *      ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
 *      indices = tf.constant([[4], [3], [1] ,[7]])
 *      updates = tf.constant([9, 10, 11, 12])
 *      update = tf.scatter_nd_add(ref, indices, updates)
 *      with tf.Session() as sess:
 *        print sess.run(update)
 *  }
* * The resulting update to ref would look like this: * * [1, 12, 3, 14, 14, 6, 7, 20] * * See {@code tf.scatter_nd} for more details about how to make updates to * slices. * * Arguments: * * scope: A Scope object * * ref: A resource handle. Must be from a VarHandleOp. * * indices: A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. * * updates: A Tensor. Must have the same type as ref. A tensor of * values to add to ref. * * Optional attributes (see {@code Attrs}): * * use_locking: An optional bool. Defaults to True. If True, the assignment will * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceScatterNdAdd extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceScatterNdAdd(Pointer p) { super(p); } /** Optional attribute setters for ResourceScatterNdAdd */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** An optional bool. Defaults to True. If True, the assignment will * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. * * Defaults to true */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceScatterNdAdd(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates) { super((Pointer)null); allocate(scope, ref, indices, updates); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates); public ResourceScatterNdAdd(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, ref, indices, updates, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceScatterNdAdd operation(Operation operation); } /** Applies sparse {@code updates} to individual values or slices within a given * * variable according to {@code indices}. * * {@code ref} is a {@code Tensor} with rank {@code P} and {@code indices} is a {@code Tensor} of rank {@code Q}. * * {@code indices} must be integer tensor, containing indices into {@code ref}. * It must be shape {@code [d_0, ..., d_{Q-2}, K]} where {@code 0 < K <= P}. * * The innermost dimension of {@code indices} (with length {@code K}) corresponds to * indices into elements (if {@code K = P}) or slices (if {@code K < P}) along the {@code K}th * dimension of {@code ref}. * * {@code updates} is {@code Tensor} of rank {@code Q-1+P-K} with shape: * *
{@code
 *  [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
 *  }
* * For example, say we want to update 4 scattered elements to a rank-1 tensor to * 8 elements. In Python, that update would look like this: * *
{@code python
 *      ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
 *      indices = tf.constant([[4], [3], [1] ,[7]])
 *      updates = tf.constant([9, 10, 11, 12])
 *      update = tf.scatter_nd_update(ref, indices, updates)
 *      with tf.Session() as sess:
 *        print sess.run(update)
 *  }
* * The resulting update to ref would look like this: * * [1, 11, 3, 10, 9, 6, 7, 12] * * See {@code tf.scatter_nd} for more details about how to make updates to * slices. * * Arguments: * * scope: A Scope object * * ref: A resource handle. Must be from a VarHandleOp. * * indices: A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. * * updates: A Tensor. Must have the same type as ref. A tensor of updated * values to add to ref. * * Optional attributes (see {@code Attrs}): * * use_locking: An optional bool. Defaults to True. If True, the assignment will * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceScatterNdUpdate extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceScatterNdUpdate(Pointer p) { super(p); } /** Optional attribute setters for ResourceScatterNdUpdate */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** An optional bool. Defaults to True. If True, the assignment will * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. * * Defaults to true */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceScatterNdUpdate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates) { super((Pointer)null); allocate(scope, ref, indices, updates); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates); public ResourceScatterNdUpdate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, ref, indices, updates, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceScatterNdUpdate operation(Operation operation); } /** Adds sparse updates to a variable reference. * * This operation computes * * # Scalar indices * ref[indices, ...] += updates[...] * * # Vector indices (for each i) * ref[indices[i], ...] += updates[i, ...] * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] * * This operation outputs {@code ref} after the update is done. * This makes it easier to chain operations that need to use the reset value. * * Duplicate entries are handled correctly: if multiple {@code indices} reference * the same location, their contributions add. * * Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. * *
* *
* * Arguments: * * scope: A Scope object * * ref: Should be from a {@code Variable} node. * * indices: A tensor of indices into the first dimension of {@code ref}. * * updates: A tensor of updated values to add to {@code ref}. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, the addition will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * {@code Output}: = Same as {@code ref}. Returned as a convenience for operations that want * to use the updated values after the update is done. */ @Namespace("tensorflow::ops") @NoOffset public static class ScatterAdd extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ScatterAdd(Pointer p) { super(p); } /** Optional attribute setters for ScatterAdd */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, the addition will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ScatterAdd(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates) { super((Pointer)null); allocate(scope, ref, indices, updates); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates); public ScatterAdd(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, ref, indices, updates, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ScatterAdd operation(Operation operation); public native @ByRef Output output_ref(); public native ScatterAdd output_ref(Output output_ref); } /** Divides a variable reference by sparse updates. * * This operation computes * *
{@code python
 *      # Scalar indices
 *      ref[indices, ...] /= updates[...]
 * 
 *      # Vector indices (for each i)
 *      ref[indices[i], ...] /= updates[i, ...]
 * 
 *      # High rank indices (for each i, ..., j)
 *      ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
 *  }
* * This operation outputs {@code ref} after the update is done. * This makes it easier to chain operations that need to use the reset value. * * Duplicate entries are handled correctly: if multiple {@code indices} reference * the same location, their contributions divide. * * Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. * * Arguments: * * scope: A Scope object * * ref: Should be from a {@code Variable} node. * * indices: A tensor of indices into the first dimension of {@code ref}. * * updates: A tensor of values that {@code ref} is divided by. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, the operation will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * {@code Output}: = Same as {@code ref}. Returned as a convenience for operations that want * to use the updated values after the update is done. */ @Namespace("tensorflow::ops") @NoOffset public static class ScatterDiv extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ScatterDiv(Pointer p) { super(p); } /** Optional attribute setters for ScatterDiv */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, the operation will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ScatterDiv(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates) { super((Pointer)null); allocate(scope, ref, indices, updates); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates); public ScatterDiv(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, ref, indices, updates, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ScatterDiv operation(Operation operation); public native @ByRef Output output_ref(); public native ScatterDiv output_ref(Output output_ref); } /** Reduces sparse updates into a variable reference using the {@code max} operation. * * This operation computes * * # Scalar indices * ref[indices, ...] = max(ref[indices, ...], updates[...]) * * # Vector indices (for each i) * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) * * This operation outputs {@code ref} after the update is done. * This makes it easier to chain operations that need to use the reset value. * * Duplicate entries are handled correctly: if multiple {@code indices} reference * the same location, their contributions combine. * * Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. * *
* *
* * Arguments: * * scope: A Scope object * * ref: Should be from a {@code Variable} node. * * indices: A tensor of indices into the first dimension of {@code ref}. * * updates: A tensor of updated values to reduce into {@code ref}. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, the update will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * {@code Output}: = Same as {@code ref}. Returned as a convenience for operations that want * to use the updated values after the update is done. */ @Namespace("tensorflow::ops") @NoOffset public static class ScatterMax extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ScatterMax(Pointer p) { super(p); } /** Optional attribute setters for ScatterMax */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, the update will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ScatterMax(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates) { super((Pointer)null); allocate(scope, ref, indices, updates); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates); public ScatterMax(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, ref, indices, updates, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ScatterMax operation(Operation operation); public native @ByRef Output output_ref(); public native ScatterMax output_ref(Output output_ref); } /** Reduces sparse updates into a variable reference using the {@code min} operation. * * This operation computes * * # Scalar indices * ref[indices, ...] = min(ref[indices, ...], updates[...]) * * # Vector indices (for each i) * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) * * This operation outputs {@code ref} after the update is done. * This makes it easier to chain operations that need to use the reset value. * * Duplicate entries are handled correctly: if multiple {@code indices} reference * the same location, their contributions combine. * * Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. * *
* *
* * Arguments: * * scope: A Scope object * * ref: Should be from a {@code Variable} node. * * indices: A tensor of indices into the first dimension of {@code ref}. * * updates: A tensor of updated values to reduce into {@code ref}. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, the update will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * {@code Output}: = Same as {@code ref}. Returned as a convenience for operations that want * to use the updated values after the update is done. */ @Namespace("tensorflow::ops") @NoOffset public static class ScatterMin extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ScatterMin(Pointer p) { super(p); } /** Optional attribute setters for ScatterMin */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, the update will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ScatterMin(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates) { super((Pointer)null); allocate(scope, ref, indices, updates); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates); public ScatterMin(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, ref, indices, updates, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ScatterMin operation(Operation operation); public native @ByRef Output output_ref(); public native ScatterMin output_ref(Output output_ref); } /** Multiplies sparse updates into a variable reference. * * This operation computes * *
{@code python
 *      # Scalar indices
 *      ref[indices, ...] *= updates[...]
 * 
 *      # Vector indices (for each i)
 *      ref[indices[i], ...] *= updates[i, ...]
 * 
 *      # High rank indices (for each i, ..., j)
 *      ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
 *  }
* * This operation outputs {@code ref} after the update is done. * This makes it easier to chain operations that need to use the reset value. * * Duplicate entries are handled correctly: if multiple {@code indices} reference * the same location, their contributions multiply. * * Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. * * Arguments: * * scope: A Scope object * * ref: Should be from a {@code Variable} node. * * indices: A tensor of indices into the first dimension of {@code ref}. * * updates: A tensor of updated values to multiply to {@code ref}. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, the operation will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * {@code Output}: = Same as {@code ref}. Returned as a convenience for operations that want * to use the updated values after the update is done. */ @Namespace("tensorflow::ops") @NoOffset public static class ScatterMul extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ScatterMul(Pointer p) { super(p); } /** Optional attribute setters for ScatterMul */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, the operation will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ScatterMul(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates) { super((Pointer)null); allocate(scope, ref, indices, updates); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates); public ScatterMul(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, ref, indices, updates, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ScatterMul operation(Operation operation); public native @ByRef Output output_ref(); public native ScatterMul output_ref(Output output_ref); } /** Applies sparse addition between {@code updates} and individual values or slices * * within a given variable according to {@code indices}. * * {@code ref} is a {@code Tensor} with rank {@code P} and {@code indices} is a {@code Tensor} of rank {@code Q}. * * {@code indices} must be integer tensor, containing indices into {@code ref}. * It must be shape {@code \\([d_0, ..., d_{Q-2}, K]\\)} where {@code 0 < K <= P}. * * The innermost dimension of {@code indices} (with length {@code K}) corresponds to * indices into elements (if {@code K = P}) or slices (if {@code K < P}) along the {@code K}th * dimension of {@code ref}. * * {@code updates} is {@code Tensor} of rank {@code Q-1+P-K} with shape: * * $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ * * For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 * elements. In Python, that addition would look like this: * * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) * indices = tf.constant([[4], [3], [1], [7]]) * updates = tf.constant([9, 10, 11, 12]) * add = tf.scatter_nd_add(ref, indices, updates) * with tf.Session() as sess: * print sess.run(add) * * The resulting update to ref would look like this: * * [1, 13, 3, 14, 14, 6, 7, 20] * * See {@code tf.scatter_nd} for more details about how to make updates to * slices. * * Arguments: * * scope: A Scope object * * ref: A mutable Tensor. Should be from a Variable node. * * indices: A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. * * updates: A Tensor. Must have the same type as ref. A tensor of updated values * to add to ref. * * Optional attributes (see {@code Attrs}): * * use_locking: An optional bool. Defaults to True. If True, the assignment will * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. * * Returns: * * {@code Output}: Same as ref. Returned as a convenience for operations that want * to use the updated values after the update is done. */ @Namespace("tensorflow::ops") @NoOffset public static class ScatterNdAdd extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ScatterNdAdd(Pointer p) { super(p); } /** Optional attribute setters for ScatterNdAdd */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** An optional bool. Defaults to True. If True, the assignment will * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ScatterNdAdd(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates) { super((Pointer)null); allocate(scope, ref, indices, updates); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates); public ScatterNdAdd(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, ref, indices, updates, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ScatterNdAdd operation(Operation operation); public native @ByRef Output output_ref(); public native ScatterNdAdd output_ref(Output output_ref); } /** Applies sparse subtraction between {@code updates} and individual values or slices * * within a given variable according to {@code indices}. * * {@code ref} is a {@code Tensor} with rank {@code P} and {@code indices} is a {@code Tensor} of rank {@code Q}. * * {@code indices} must be integer tensor, containing indices into {@code ref}. * It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where {@code 0 < K <= P}. * * The innermost dimension of {@code indices} (with length {@code K}) corresponds to * indices into elements (if {@code K = P}) or slices (if {@code K < P}) along the {@code K}th * dimension of {@code ref}. * * {@code updates} is {@code Tensor} of rank {@code Q-1+P-K} with shape: * * $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ * * For example, say we want to subtract 4 scattered elements from a rank-1 tensor * with 8 elements. In Python, that subtraction would look like this: * * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) * indices = tf.constant([[4], [3], [1], [7]]) * updates = tf.constant([9, 10, 11, 12]) * sub = tf.scatter_nd_sub(ref, indices, updates) * with tf.Session() as sess: * print sess.run(sub) * * The resulting update to ref would look like this: * * [1, -9, 3, -6, -4, 6, 7, -4] * * See {@code tf.scatter_nd} for more details about how to make updates to * slices. * * Arguments: * * scope: A Scope object * * ref: A mutable Tensor. Should be from a Variable node. * * indices: A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. * * updates: A Tensor. Must have the same type as ref. A tensor of updated values * to subtract from ref. * * Optional attributes (see {@code Attrs}): * * use_locking: An optional bool. Defaults to True. If True, the assignment will * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. * * Returns: * * {@code Output}: Same as ref. Returned as a convenience for operations that want * to use the updated values after the update is done. */ @Namespace("tensorflow::ops") @NoOffset public static class ScatterNdSub extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ScatterNdSub(Pointer p) { super(p); } /** Optional attribute setters for ScatterNdSub */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** An optional bool. Defaults to True. If True, the assignment will * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ScatterNdSub(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates) { super((Pointer)null); allocate(scope, ref, indices, updates); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates); public ScatterNdSub(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, ref, indices, updates, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ScatterNdSub operation(Operation operation); public native @ByRef Output output_ref(); public native ScatterNdSub output_ref(Output output_ref); } /** Applies sparse {@code updates} to individual values or slices within a given * * variable according to {@code indices}. * * {@code ref} is a {@code Tensor} with rank {@code P} and {@code indices} is a {@code Tensor} of rank {@code Q}. * * {@code indices} must be integer tensor, containing indices into {@code ref}. * It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where {@code 0 < K <= P}. * * The innermost dimension of {@code indices} (with length {@code K}) corresponds to * indices into elements (if {@code K = P}) or slices (if {@code K < P}) along the {@code K}th * dimension of {@code ref}. * * {@code updates} is {@code Tensor} of rank {@code Q-1+P-K} with shape: * * $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ * * For example, say we want to update 4 scattered elements to a rank-1 tensor to * 8 elements. In Python, that update would look like this: * *
{@code python
 *      ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
 *      indices = tf.constant([[4], [3], [1] ,[7]])
 *      updates = tf.constant([9, 10, 11, 12])
 *      update = tf.scatter_nd_update(ref, indices, updates)
 *      with tf.Session() as sess:
 *        print sess.run(update)
 *  }
* * The resulting update to ref would look like this: * * [1, 11, 3, 10, 9, 6, 7, 12] * * See {@code tf.scatter_nd} for more details about how to make updates to * slices. * * See also {@code tf.scatter_update} and {@code tf.batch_scatter_update}. * * Arguments: * * scope: A Scope object * * ref: A mutable Tensor. Should be from a Variable node. * * indices: A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. * * updates: A Tensor. Must have the same type as ref. A tensor of updated * values to add to ref. * * Optional attributes (see {@code Attrs}): * * use_locking: An optional bool. Defaults to True. If True, the assignment will * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. * * Returns: * * {@code Output}: Same as ref. Returned as a convenience for operations that want to * use the updated values after the update is done. */ @Namespace("tensorflow::ops") @NoOffset public static class ScatterNdUpdate extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ScatterNdUpdate(Pointer p) { super(p); } /** Optional attribute setters for ScatterNdUpdate */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** An optional bool. Defaults to True. If True, the assignment will * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. * * Defaults to true */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ScatterNdUpdate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates) { super((Pointer)null); allocate(scope, ref, indices, updates); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates); public ScatterNdUpdate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, ref, indices, updates, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ScatterNdUpdate operation(Operation operation); public native @ByRef Output output_ref(); public native ScatterNdUpdate output_ref(Output output_ref); } /** Subtracts sparse updates to a variable reference. * *
{@code python
 *      # Scalar indices
 *      ref[indices, ...] -= updates[...]
 * 
 *      # Vector indices (for each i)
 *      ref[indices[i], ...] -= updates[i, ...]
 * 
 *      # High rank indices (for each i, ..., j)
 *      ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
 *  }
* * This operation outputs {@code ref} after the update is done. * This makes it easier to chain operations that need to use the reset value. * * Duplicate entries are handled correctly: if multiple {@code indices} reference * the same location, their (negated) contributions add. * * Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. * *
* *
* * Arguments: * * scope: A Scope object * * ref: Should be from a {@code Variable} node. * * indices: A tensor of indices into the first dimension of {@code ref}. * * updates: A tensor of updated values to subtract from {@code ref}. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * {@code Output}: = Same as {@code ref}. Returned as a convenience for operations that want * to use the updated values after the update is done. */ @Namespace("tensorflow::ops") @NoOffset public static class ScatterSub extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ScatterSub(Pointer p) { super(p); } /** Optional attribute setters for ScatterSub */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ScatterSub(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates) { super((Pointer)null); allocate(scope, ref, indices, updates); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates); public ScatterSub(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, ref, indices, updates, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ScatterSub operation(Operation operation); public native @ByRef Output output_ref(); public native ScatterSub output_ref(Output output_ref); } /** Applies sparse updates to a variable reference. * * This operation computes * *
{@code python
 *      # Scalar indices
 *      ref[indices, ...] = updates[...]
 * 
 *      # Vector indices (for each i)
 *      ref[indices[i], ...] = updates[i, ...]
 * 
 *      # High rank indices (for each i, ..., j)
 *      ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
 *  }
* * This operation outputs {@code ref} after the update is done. * This makes it easier to chain operations that need to use the reset value. * * If values in {@code ref} is to be updated more than once, because there are * duplicate entries in {@code indices}, the order at which the updates happen * for each value is undefined. * * Requires {@code updates.shape = indices.shape + ref.shape[1:]} or {@code updates.shape = []}. * *
* *
* * See also {@code tf.batch_scatter_update} and {@code tf.scatter_nd_update}. * * Arguments: * * scope: A Scope object * * ref: Should be from a {@code Variable} node. * * indices: A tensor of indices into the first dimension of {@code ref}. * * updates: A tensor of updated values to store in {@code ref}. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, the assignment will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * {@code Output}: = Same as {@code ref}. Returned as a convenience for operations that want * to use the updated values after the update is done. */ @Namespace("tensorflow::ops") @NoOffset public static class ScatterUpdate extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ScatterUpdate(Pointer p) { super(p); } /** Optional attribute setters for ScatterUpdate */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, the assignment will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to true */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ScatterUpdate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates) { super((Pointer)null); allocate(scope, ref, indices, updates); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates); public ScatterUpdate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, ref, indices, updates, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input ref, @ByVal Input indices, @ByVal Input updates, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ScatterUpdate operation(Operation operation); public native @ByRef Output output_ref(); public native ScatterUpdate output_ref(Output output_ref); } /** Returns a tensor that may be mutated, but only persists within a single step. * * This is an experimental op for internal use only and it is possible to use this * op in unsafe ways. DO NOT USE unless you fully understand the risks. * * It is the caller's responsibility to ensure that 'ref' is eventually passed to a * matching 'DestroyTemporaryVariable' op after all other uses have completed. * * Outputs a ref to the tensor state so it may be read or modified. * * E.g. * var = state_ops._temporary_variable([1, 2], types.float_) * var_name = var.op.name * var = state_ops.assign(var, [[4.0, 5.0]]) * var = state_ops.assign_add(var, [[6.0, 7.0]]) * final = state_ops._destroy_temporary_variable(var, var_name=var_name) * * Arguments: * * scope: A Scope object * * shape: The shape of the variable tensor. * * dtype: The type of elements in the variable tensor. * * Optional attributes (see {@code Attrs}): * * var_name: Overrides the name used for the temporary variable resource. Default * value is the name of the 'TemporaryVariable' op (which is guaranteed unique). * * Returns: * * {@code Output}: A reference to the variable tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class TemporaryVariable extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TemporaryVariable(Pointer p) { super(p); } /** Optional attribute setters for TemporaryVariable */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Overrides the name used for the temporary variable resource. Default * value is the name of the 'TemporaryVariable' op (which is guaranteed unique). * * Defaults to "" */ public native @ByVal Attrs VarName(@StringPiece BytePointer x); public native @ByVal Attrs VarName(@StringPiece String x); public native @StringPiece BytePointer var_name_(); public native Attrs var_name_(BytePointer var_name_); } public TemporaryVariable(@Const @ByRef Scope scope, @ByVal PartialTensorShape shape, @Cast("tensorflow::DataType") int dtype) { super((Pointer)null); allocate(scope, shape, dtype); } private native void allocate(@Const @ByRef Scope scope, @ByVal PartialTensorShape shape, @Cast("tensorflow::DataType") int dtype); public TemporaryVariable(@Const @ByRef Scope scope, @ByVal PartialTensorShape shape, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, shape, dtype, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal PartialTensorShape shape, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs VarName(@StringPiece BytePointer x); public static native @ByVal Attrs VarName(@StringPiece String x); public native @ByRef Operation operation(); public native TemporaryVariable operation(Operation operation); public native @ByRef Output ref(); public native TemporaryVariable ref(Output ref); } /** Holds state in the form of a tensor that persists across steps. * * Outputs a ref to the tensor state so it may be read or modified. * TODO(zhifengc/mrry): Adds a pointer to a more detail document * about sharing states in tensorflow. * * Arguments: * * scope: A Scope object * * shape: The shape of the variable tensor. * * dtype: The type of elements in the variable tensor. * * Optional attributes (see {@code Attrs}): * * container: If non-empty, this variable is placed in the given container. * Otherwise, a default container is used. * * shared_name: If non-empty, this variable is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. * * Returns: * * {@code Output}: A reference to the variable tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Variable extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Variable(Pointer p) { super(p); } /** Optional attribute setters for Variable */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If non-empty, this variable is placed in the given container. * Otherwise, a default container is used. * * Defaults to "" */ /// public native @ByVal Attrs Container(@StringPiece BytePointer x); public native @ByVal Attrs Container(@StringPiece String x); /** If non-empty, this variable is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. * * Defaults to "" */ public native @ByVal Attrs SharedName(@StringPiece BytePointer x); public native @ByVal Attrs SharedName(@StringPiece String x); public native @StringPiece BytePointer container_(); public native Attrs container_(BytePointer container_); public native @StringPiece BytePointer shared_name_(); public native Attrs shared_name_(BytePointer shared_name_); } public Variable(@Const @ByRef Scope scope, @ByVal PartialTensorShape shape, @Cast("tensorflow::DataType") int dtype) { super((Pointer)null); allocate(scope, shape, dtype); } private native void allocate(@Const @ByRef Scope scope, @ByVal PartialTensorShape shape, @Cast("tensorflow::DataType") int dtype); public Variable(@Const @ByRef Scope scope, @ByVal PartialTensorShape shape, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, shape, dtype, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal PartialTensorShape shape, @Cast("tensorflow::DataType") int dtype, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Container(@StringPiece BytePointer x); public static native @ByVal Attrs Container(@StringPiece String x); public static native @ByVal Attrs SharedName(@StringPiece BytePointer x); public static native @ByVal Attrs SharedName(@StringPiece String x); public native @ByRef Operation operation(); public native Variable operation(Operation operation); public native @ByRef Output ref(); public native Variable ref(Output ref); } /** \} */ // namespace ops // namespace tensorflow // #endif // TENSORFLOW_CC_OPS_STATE_OPS_H_ // Parsed from tensorflow/cc/ops/string_ops.h // This file is MACHINE GENERATED! Do not edit. // #ifndef TENSORFLOW_CC_OPS_STRING_OPS_H_ // #define TENSORFLOW_CC_OPS_STRING_OPS_H_ // This file is MACHINE GENERATED! Do not edit. // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/cc/framework/scope.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/gtl/array_slice.h" /** \defgroup string_ops String Ops * \{

* Converts each entry in the given tensor to strings. Supports many numeric * * types and boolean. * * Arguments: * * scope: A Scope object * * Optional attributes (see {@code Attrs}): * * precision: The post-decimal precision to use for floating point numbers. * Only used if precision > -1. * * scientific: Use scientific notation for floating point numbers. * * shortest: Use shortest representation (either scientific or standard) for * floating point numbers. * * width: Pad pre-decimal numbers to this width. * Applies to both floating point and integer numbers. * Only used if width > -1. * * fill: The value to pad if width > -1. If empty, pads with spaces. * Another typical value is '0'. String cannot be longer than 1 character. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class AsString extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AsString(Pointer p) { super(p); } /** Optional attribute setters for AsString */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The post-decimal precision to use for floating point numbers. * Only used if precision > -1. * * Defaults to -1 */ /// public native @ByVal Attrs Precision(@Cast("tensorflow::int64") long x); /** Use scientific notation for floating point numbers. * * Defaults to false */ /// public native @ByVal Attrs Scientific(@Cast("bool") boolean x); /** Use shortest representation (either scientific or standard) for * floating point numbers. * * Defaults to false */ /// public native @ByVal Attrs Shortest(@Cast("bool") boolean x); /** Pad pre-decimal numbers to this width. * Applies to both floating point and integer numbers. * Only used if width > -1. * * Defaults to -1 */ /// public native @ByVal Attrs Width(@Cast("tensorflow::int64") long x); /** The value to pad if width > -1. If empty, pads with spaces. * Another typical value is '0'. String cannot be longer than 1 character. * * Defaults to "" */ public native @ByVal Attrs Fill(@StringPiece BytePointer x); public native @ByVal Attrs Fill(@StringPiece String x); public native @Cast("tensorflow::int64") long precision_(); public native Attrs precision_(long precision_); public native @Cast("bool") boolean scientific_(); public native Attrs scientific_(boolean scientific_); public native @Cast("bool") boolean shortest_(); public native Attrs shortest_(boolean shortest_); public native @Cast("tensorflow::int64") long width_(); public native Attrs width_(long width_); public native @StringPiece BytePointer fill_(); public native Attrs fill_(BytePointer fill_); } public AsString(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public AsString(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Precision(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Scientific(@Cast("bool") boolean x); public static native @ByVal Attrs Shortest(@Cast("bool") boolean x); public static native @ByVal Attrs Width(@Cast("tensorflow::int64") long x); public static native @ByVal Attrs Fill(@StringPiece BytePointer x); public static native @ByVal Attrs Fill(@StringPiece String x); public native @ByRef Operation operation(); public native AsString operation(Operation operation); public native @ByRef Output output(); public native AsString output(Output output); } /** Decode web-safe base64-encoded strings. * * Input may or may not have padding at the end. See EncodeBase64 for padding. * Web-safe means that input must use - and _ instead of + and /. * * Arguments: * * scope: A Scope object * * input: Base64 strings to decode. * * Returns: * * {@code Output}: Decoded strings. */ @Namespace("tensorflow::ops") @NoOffset public static class DecodeBase64 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DecodeBase64(Pointer p) { super(p); } public DecodeBase64(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native DecodeBase64 operation(Operation operation); public native @ByRef Output output(); public native DecodeBase64 output(Output output); } /** Encode strings into web-safe base64 format. * * Refer to the following article for more information on base64 format: * en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the * end so that the encoded has length multiple of 4. See Padding section of the * link above. * * Web-safe means that the encoder uses - and _ instead of + and /. * * Arguments: * * scope: A Scope object * * input: Strings to be encoded. * * Optional attributes (see {@code Attrs}): * * pad: Bool whether padding is applied at the ends. * * Returns: * * {@code Output}: Input strings encoded in base64. */ @Namespace("tensorflow::ops") @NoOffset public static class EncodeBase64 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EncodeBase64(Pointer p) { super(p); } /** Optional attribute setters for EncodeBase64 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** Bool whether padding is applied at the ends. * * Defaults to false */ public native @ByVal Attrs Pad(@Cast("bool") boolean x); public native @Cast("bool") boolean pad_(); public native Attrs pad_(boolean pad_); } public EncodeBase64(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public EncodeBase64(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Pad(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native EncodeBase64 operation(Operation operation); public native @ByRef Output output(); public native EncodeBase64 output(Output output); } /** Joins a string Tensor across the given dimensions. * * Computes the string join across dimensions in the given string Tensor of shape * {@code [\\(d_0, d_1, ..., d_{n-1}\\)]}. Returns a new Tensor created by joining the input * strings with the given separator (default: empty string). Negative indices are * counted backwards from the end, with {@code -1} being equivalent to {@code n - 1}. If * indices are not specified, joins across all dimensions beginning from {@code n - 1} * through {@code 0}. * * For example: * *

{@code python
 *  # tensor `a` is [["a", "b"], ["c", "d"]]
 *  tf.reduce_join(a, 0) ==> ["ac", "bd"]
 *  tf.reduce_join(a, 1) ==> ["ab", "cd"]
 *  tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"]
 *  tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"]
 *  tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
 *  tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
 *  tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
 *  tf.reduce_join(a, [0, 1]) ==> "acbd"
 *  tf.reduce_join(a, [1, 0]) ==> "abcd"
 *  tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]]
 *  tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd"
 *  }
* * Arguments: * * scope: A Scope object * * inputs: The input to be joined. All reduced indices must have non-zero size. * * reduction_indices: The dimensions to reduce over. Dimensions are reduced in the * order specified. Omitting {@code reduction_indices} is equivalent to passing * {@code [n-1, n-2, ..., 0]}. Negative indices from {@code -n} to {@code -1} are supported. * * Optional attributes (see {@code Attrs}): * * keep_dims: If {@code True}, retain reduced dimensions with length {@code 1}. * * separator: The separator to use when joining. * * Returns: * * {@code Output}: Has shape equal to that of the input with reduced dimensions removed or * set to {@code 1} depending on {@code keep_dims}. */ @Namespace("tensorflow::ops") @NoOffset public static class ReduceJoin extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReduceJoin(Pointer p) { super(p); } /** Optional attribute setters for ReduceJoin */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, retain reduced dimensions with length {@code 1}. * * Defaults to false */ /// public native @ByVal Attrs KeepDims(@Cast("bool") boolean x); /** The separator to use when joining. * * Defaults to "" */ public native @ByVal Attrs Separator(@StringPiece BytePointer x); public native @ByVal Attrs Separator(@StringPiece String x); public native @Cast("bool") boolean keep_dims_(); public native Attrs keep_dims_(boolean keep_dims_); public native @StringPiece BytePointer separator_(); public native Attrs separator_(BytePointer separator_); } public ReduceJoin(@Const @ByRef Scope scope, @ByVal Input inputs, @ByVal Input reduction_indices) { super((Pointer)null); allocate(scope, inputs, reduction_indices); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input inputs, @ByVal Input reduction_indices); public ReduceJoin(@Const @ByRef Scope scope, @ByVal Input inputs, @ByVal Input reduction_indices, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, inputs, reduction_indices, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input inputs, @ByVal Input reduction_indices, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs KeepDims(@Cast("bool") boolean x); public static native @ByVal Attrs Separator(@StringPiece BytePointer x); public static native @ByVal Attrs Separator(@StringPiece String x); public native @ByRef Operation operation(); public native ReduceJoin operation(Operation operation); public native @ByRef Output output(); public native ReduceJoin output(Output output); } /** Check if the input matches the regex pattern. * * The input is a string tensor of any shape. The pattern is a scalar * string tensor which is applied to every element of the input tensor. * The boolean values (True or False) of the output tensor indicate * if the input matches the regex pattern provided. * * The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) * * Arguments: * * scope: A Scope object * * input: A string tensor of the text to be processed. * * pattern: A scalar string tensor containing the regular expression to match the input. * * Returns: * * {@code Output}: A bool tensor with the same shape as {@code input}. */ @Namespace("tensorflow::ops") @NoOffset public static class RegexFullMatch extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RegexFullMatch(Pointer p) { super(p); } public RegexFullMatch(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input pattern) { super((Pointer)null); allocate(scope, input, pattern); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input pattern); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native RegexFullMatch operation(Operation operation); public native @ByRef Output output(); public native RegexFullMatch output(Output output); } /** Replaces the match of pattern in input with rewrite. * * It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) * * Arguments: * * scope: A Scope object * * input: The text to be processed. * * pattern: The regular expression to match the input. * * rewrite: The rewrite to be applied to the matched expresion. * * Optional attributes (see {@code Attrs}): * * replace_global: If True, the replacement is global, otherwise the replacement * is done only on the first match. * * Returns: * * {@code Output}: The text after applying pattern and rewrite. */ @Namespace("tensorflow::ops") @NoOffset public static class RegexReplace extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RegexReplace(Pointer p) { super(p); } /** Optional attribute setters for RegexReplace */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, the replacement is global, otherwise the replacement * is done only on the first match. * * Defaults to true */ public native @ByVal Attrs ReplaceGlobal(@Cast("bool") boolean x); public native @Cast("bool") boolean replace_global_(); public native Attrs replace_global_(boolean replace_global_); } public RegexReplace(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input pattern, @ByVal Input rewrite) { super((Pointer)null); allocate(scope, input, pattern, rewrite); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input pattern, @ByVal Input rewrite); public RegexReplace(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input pattern, @ByVal Input rewrite, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, pattern, rewrite, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input pattern, @ByVal Input rewrite, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs ReplaceGlobal(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native RegexReplace operation(Operation operation); public native @ByRef Output output(); public native RegexReplace output(Output output); } /** Formats a string template using a list of tensors. * * Formats a string template using a list of tensors, pretty-printing tensor summaries. * * Arguments: * * scope: A Scope object * * inputs: The list of tensors to format into the placeholder string. * * Optional attributes (see {@code Attrs}): * * template_: A string, the template to format tensor summaries into. * * placeholder: A string, at each placeholder in the template a subsequent tensor summary will be inserted. * * summarize: When formatting the tensor summaries print the first and last summarize entries of each tensor dimension. * * Returns: * * {@code Output}: = The resulting string scalar. */ @Namespace("tensorflow::ops") @NoOffset public static class StringFormat extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StringFormat(Pointer p) { super(p); } /** Optional attribute setters for StringFormat */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** A string, the template to format tensor summaries into. * * Defaults to "%s" */ /// public native @ByVal Attrs Template(@StringPiece BytePointer x); public native @ByVal Attrs Template(@StringPiece String x); /** A string, at each placeholder in the template a subsequent tensor summary will be inserted. * * Defaults to "%s" */ /// public native @ByVal Attrs Placeholder(@StringPiece BytePointer x); public native @ByVal Attrs Placeholder(@StringPiece String x); /** When formatting the tensor summaries print the first and last summarize entries of each tensor dimension. * * Defaults to 3 */ public native @ByVal Attrs Summarize(@Cast("tensorflow::int64") long x); public native @StringPiece BytePointer template_(); public native Attrs template_(BytePointer template_); public native @StringPiece BytePointer placeholder_(); public native Attrs placeholder_(BytePointer placeholder_); public native @Cast("tensorflow::int64") long summarize_(); public native Attrs summarize_(long summarize_); } public StringFormat(@Const @ByRef Scope scope, @ByVal InputList inputs) { super((Pointer)null); allocate(scope, inputs); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList inputs); public StringFormat(@Const @ByRef Scope scope, @ByVal InputList inputs, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, inputs, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList inputs, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Template(@StringPiece BytePointer x); public static native @ByVal Attrs Template(@StringPiece String x); public static native @ByVal Attrs Placeholder(@StringPiece BytePointer x); public static native @ByVal Attrs Placeholder(@StringPiece String x); public static native @ByVal Attrs Summarize(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native StringFormat operation(Operation operation); public native @ByRef Output output(); public native StringFormat output(Output output); } /** Joins the strings in the given list of string tensors into one tensor; * * with the given separator (default is an empty separator). * * Arguments: * * scope: A Scope object * * inputs: A list of string tensors. The tensors must all have the same shape, * or be scalars. Scalars may be mixed in; these will be broadcast to the shape * of non-scalar inputs. * * Optional attributes (see {@code Attrs}): * * separator: string, an optional join separator. * * Returns: * * {@code Output}: The output tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class StringJoin extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StringJoin(Pointer p) { super(p); } /** Optional attribute setters for StringJoin */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** string, an optional join separator. * * Defaults to "" */ public native @ByVal Attrs Separator(@StringPiece BytePointer x); public native @ByVal Attrs Separator(@StringPiece String x); public native @StringPiece BytePointer separator_(); public native Attrs separator_(BytePointer separator_); } public StringJoin(@Const @ByRef Scope scope, @ByVal InputList inputs) { super((Pointer)null); allocate(scope, inputs); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList inputs); public StringJoin(@Const @ByRef Scope scope, @ByVal InputList inputs, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, inputs, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal InputList inputs, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Separator(@StringPiece BytePointer x); public static native @ByVal Attrs Separator(@StringPiece String x); public native @ByRef Operation operation(); public native StringJoin operation(Operation operation); public native @ByRef Output output(); public native StringJoin output(Output output); } /** String lengths of {@code input}. * * Computes the length of each string given in the input tensor. * * Arguments: * * scope: A Scope object * * input: The string for which to compute the length. * * Optional attributes (see {@code Attrs}): * * unit: The unit that is counted to compute string length. One of: {@code "BYTE"} (for * the number of bytes in each string) or {@code "UTF8_CHAR"} (for the number of UTF-8 * encoded Unicode code points in each string). Results are undefined * if {@code unit=UTF8_CHAR} and the {@code input} strings do not contain structurally * valid UTF-8. * * Returns: * * {@code Output}: Integer tensor that has the same shape as {@code input}. The output contains the * element-wise string lengths of {@code input}. */ @Namespace("tensorflow::ops") @NoOffset public static class StringLength extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StringLength(Pointer p) { super(p); } /** Optional attribute setters for StringLength */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** The unit that is counted to compute string length. One of: {@code "BYTE"} (for * the number of bytes in each string) or {@code "UTF8_CHAR"} (for the number of UTF-8 * encoded Unicode code points in each string). Results are undefined * if {@code unit=UTF8_CHAR} and the {@code input} strings do not contain structurally * valid UTF-8. * * Defaults to "BYTE" */ public native @ByVal Attrs Unit(@StringPiece BytePointer x); public native @ByVal Attrs Unit(@StringPiece String x); public native @StringPiece BytePointer unit_(); public native Attrs unit_(BytePointer unit_); } public StringLength(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public StringLength(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs Unit(@StringPiece BytePointer x); public static native @ByVal Attrs Unit(@StringPiece String x); public native @ByRef Operation operation(); public native StringLength operation(Operation operation); public native @ByRef Output output(); public native StringLength output(Output output); } /** Split elements of {@code input} based on {@code delimiter} into a {@code SparseTensor}. * * Let N be the size of source (typically N will be the batch size). Split each * element of {@code input} based on {@code delimiter} and return a {@code SparseTensor} * containing the splitted tokens. Empty tokens are ignored. * * {@code delimiter} can be empty, or a string of split characters. If {@code delimiter} is an * empty string, each element of {@code input} is split into individual single-byte * character strings, including splitting of UTF-8 multibyte sequences. Otherwise * every character of {@code delimiter} is a potential split point. * * For example: * N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output * will be * * indices = [0, 0; * 0, 1; * 1, 0; * 1, 1; * 1, 2] * shape = [2, 3] * values = ['hello', 'world', 'a', 'b', 'c'] * * Arguments: * * scope: A Scope object * * input: 1-D. Strings to split. * * delimiter: 0-D. Delimiter characters (bytes), or empty string. * * Optional attributes (see {@code Attrs}): * * skip_empty: A {@code bool}. If {@code True}, skip the empty strings from the result. * * Returns: * * {@code Output} indices: A dense matrix of int64 representing the indices of the sparse tensor. * * {@code Output} values: A vector of strings corresponding to the splited values. * * {@code Output} shape: a length-2 vector of int64 representing the shape of the sparse * tensor, where the first value is N and the second value is the maximum number * of tokens in a single input entry. */ @Namespace("tensorflow::ops") @NoOffset public static class StringSplit extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StringSplit(Pointer p) { super(p); } /** Optional attribute setters for StringSplit */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** A {@code bool}. If {@code True}, skip the empty strings from the result. * * Defaults to true */ public native @ByVal Attrs SkipEmpty(@Cast("bool") boolean x); public native @Cast("bool") boolean skip_empty_(); public native Attrs skip_empty_(boolean skip_empty_); } public StringSplit(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input delimiter) { super((Pointer)null); allocate(scope, input, delimiter); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input delimiter); public StringSplit(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input delimiter, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, delimiter, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input delimiter, @Const @ByRef Attrs attrs); public static native @ByVal Attrs SkipEmpty(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native StringSplit operation(Operation operation); public native @ByRef Output indices(); public native StringSplit indices(Output indices); public native @ByRef Output values(); public native StringSplit values(Output values); public native @ByRef Output shape(); public native StringSplit shape(Output shape); } /** Split elements of {@code source} based on {@code sep} into a {@code SparseTensor}. * * Let N be the size of source (typically N will be the batch size). Split each * element of {@code source} based on {@code sep} and return a {@code SparseTensor} * containing the split tokens. Empty tokens are ignored. * * For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c', * then the output will be *
{@code
 *  st.indices = [0, 0;
 *                0, 1;
 *                1, 0;
 *                1, 1;
 *                1, 2]
 *  st.shape = [2, 3]
 *  st.values = ['hello', 'world', 'a', 'b', 'c']
 *  }
* * If {@code sep} is given, consecutive delimiters are not grouped together and are * deemed to delimit empty strings. For example, source of {@code "1<>2<><>3"} and * sep of {@code "<>"} returns {@code ["1", "2", "", "3"]}. If {@code sep} is None or an empty * string, consecutive whitespace are regarded as a single separator, and the * result will contain no empty strings at the startor end if the string has * leading or trailing whitespace. * * Note that the above mentioned behavior matches python's str.split. * * Arguments: * * scope: A Scope object * * input: {@code 1-D} string {@code Tensor}, the strings to split. * * sep: {@code 0-D} string {@code Tensor}, the delimiter character. * * Optional attributes (see {@code Attrs}): * * maxsplit: An {@code int}. If {@code maxsplit > 0}, limit of the split of the result. * * Returns: * * {@code Output} indices * * {@code Output} values * * {@code Output} shape */ @Namespace("tensorflow::ops") @NoOffset public static class StringSplitV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StringSplitV2(Pointer p) { super(p); } /** Optional attribute setters for StringSplitV2 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** An {@code int}. If {@code maxsplit > 0}, limit of the split of the result. * * Defaults to -1 */ public native @ByVal Attrs Maxsplit(@Cast("tensorflow::int64") long x); public native @Cast("tensorflow::int64") long maxsplit_(); public native Attrs maxsplit_(long maxsplit_); } public StringSplitV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input sep) { super((Pointer)null); allocate(scope, input, sep); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input sep); public StringSplitV2(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input sep, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, input, sep, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input sep, @Const @ByRef Attrs attrs); public static native @ByVal Attrs Maxsplit(@Cast("tensorflow::int64") long x); public native @ByRef Operation operation(); public native StringSplitV2 operation(Operation operation); public native @ByRef Output indices(); public native StringSplitV2 indices(Output indices); public native @ByRef Output values(); public native StringSplitV2 values(Output values); public native @ByRef Output shape(); public native StringSplitV2 shape(Output shape); } /** Strip leading and trailing whitespaces from the Tensor. * * Arguments: * * scope: A Scope object * * input: A string {@code Tensor} of any shape. * * Returns: * * {@code Output}: A string {@code Tensor} of the same shape as the input. */ @Namespace("tensorflow::ops") @NoOffset public static class StringStrip extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StringStrip(Pointer p) { super(p); } public StringStrip(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native StringStrip operation(Operation operation); public native @ByRef Output output(); public native StringStrip output(Output output); } /** Converts each string in the input Tensor to its hash mod by a number of buckets. * * The hash function is deterministic on the content of the string within the * process. * * Note that the hash function may change from time to time. * This functionality will be deprecated and it's recommended to use * {@code tf.string_to_hash_bucket_fast()} or {@code tf.string_to_hash_bucket_strong()}. * * Arguments: * * scope: A Scope object * * num_buckets: The number of buckets. * * Returns: * * {@code Output}: A Tensor of the same shape as the input {@code string_tensor}. */ @Namespace("tensorflow::ops") @NoOffset public static class StringToHashBucket extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StringToHashBucket(Pointer p) { super(p); } public StringToHashBucket(@Const @ByRef Scope scope, @ByVal Input string_tensor, @Cast("tensorflow::int64") long num_buckets) { super((Pointer)null); allocate(scope, string_tensor, num_buckets); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input string_tensor, @Cast("tensorflow::int64") long num_buckets); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native StringToHashBucket operation(Operation operation); public native @ByRef Output output(); public native StringToHashBucket output(Output output); } /** Converts each string in the input Tensor to its hash mod by a number of buckets. * * The hash function is deterministic on the content of the string within the * process and will never change. However, it is not suitable for cryptography. * This function may be used when CPU time is scarce and inputs are trusted or * unimportant. There is a risk of adversaries constructing inputs that all hash * to the same bucket. To prevent this problem, use a strong hash function with * {@code tf.string_to_hash_bucket_strong}. * * Arguments: * * scope: A Scope object * * input: The strings to assign a hash bucket. * * num_buckets: The number of buckets. * * Returns: * * {@code Output}: A Tensor of the same shape as the input {@code string_tensor}. */ @Namespace("tensorflow::ops") @NoOffset public static class StringToHashBucketFast extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StringToHashBucketFast(Pointer p) { super(p); } public StringToHashBucketFast(@Const @ByRef Scope scope, @ByVal Input input, @Cast("tensorflow::int64") long num_buckets) { super((Pointer)null); allocate(scope, input, num_buckets); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Cast("tensorflow::int64") long num_buckets); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native StringToHashBucketFast operation(Operation operation); public native @ByRef Output output(); public native StringToHashBucketFast output(Output output); } /** Converts each string in the input Tensor to its hash mod by a number of buckets. * * The hash function is deterministic on the content of the string within the * process. The hash function is a keyed hash function, where attribute {@code key} * defines the key of the hash function. {@code key} is an array of 2 elements. * * A strong hash is important when inputs may be malicious, e.g. URLs with * additional components. Adversaries could try to make their inputs hash to the * same bucket for a denial-of-service attack or to skew the results. A strong * hash prevents this by making it difficult, if not infeasible, to compute inputs * that hash to the same bucket. This comes at a cost of roughly 4x higher compute * time than {@code tf.string_to_hash_bucket_fast}. * * Arguments: * * scope: A Scope object * * input: The strings to assign a hash bucket. * * num_buckets: The number of buckets. * * key: The key for the keyed hash function passed as a list of two uint64 * elements. * * Returns: * * {@code Output}: A Tensor of the same shape as the input {@code string_tensor}. */ @Namespace("tensorflow::ops") @NoOffset public static class StringToHashBucketStrong extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StringToHashBucketStrong(Pointer p) { super(p); } public StringToHashBucketStrong(@Const @ByRef Scope scope, @ByVal Input input, @Cast("tensorflow::int64") long num_buckets, @ArraySlice IntPointer key) { super((Pointer)null); allocate(scope, input, num_buckets, key); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Cast("tensorflow::int64") long num_buckets, @ArraySlice IntPointer key); public StringToHashBucketStrong(@Const @ByRef Scope scope, @ByVal Input input, @Cast("tensorflow::int64") long num_buckets, @ArraySlice IntBuffer key) { super((Pointer)null); allocate(scope, input, num_buckets, key); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Cast("tensorflow::int64") long num_buckets, @ArraySlice IntBuffer key); public StringToHashBucketStrong(@Const @ByRef Scope scope, @ByVal Input input, @Cast("tensorflow::int64") long num_buckets, @ArraySlice int... key) { super((Pointer)null); allocate(scope, input, num_buckets, key); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @Cast("tensorflow::int64") long num_buckets, @ArraySlice int... key); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native StringToHashBucketStrong operation(Operation operation); public native @ByRef Output output(); public native StringToHashBucketStrong output(Output output); } /** Return substrings from {@code Tensor} of strings. * * For each string in the input {@code Tensor}, creates a substring starting at index * {@code pos} with a total length of {@code len}. * * If {@code len} defines a substring that would extend beyond the length of the input * string, then as many characters as possible are used. * * A negative {@code pos} indicates distance within the string backwards from the end. * * If {@code pos} specifies an index which is out of range for any of the input strings, * then an {@code InvalidArgumentError} is thrown. * * {@code pos} and {@code len} must have the same shape, otherwise a {@code ValueError} is thrown on * Op creation. * * *NOTE*: {@code Substr} supports broadcasting up to two dimensions. More about * broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * --- * * Examples * * Using scalar {@code pos} and {@code len}: * *
{@code python
 *  input = [b'Hello', b'World']
 *  position = 1
 *  length = 3
 * 
 *  output = [b'ell', b'orl']
 *  }
* * Using {@code pos} and {@code len} with same shape as {@code input}: * *
{@code python
 *  input = [[b'ten', b'eleven', b'twelve'],
 *           [b'thirteen', b'fourteen', b'fifteen'],
 *           [b'sixteen', b'seventeen', b'eighteen']]
 *  position = [[1, 2, 3],
 *              [1, 2, 3],
 *              [1, 2, 3]]
 *  length =   [[2, 3, 4],
 *              [4, 3, 2],
 *              [5, 5, 5]]
 * 
 *  output = [[b'en', b'eve', b'lve'],
 *            [b'hirt', b'urt', b'te'],
 *            [b'ixtee', b'vente', b'hteen']]
 *  }
* * Broadcasting {@code pos} and {@code len} onto {@code input}: * *
{@code
 *  input = [[b'ten', b'eleven', b'twelve'],
 *           [b'thirteen', b'fourteen', b'fifteen'],
 *           [b'sixteen', b'seventeen', b'eighteen'],
 *           [b'nineteen', b'twenty', b'twentyone']]
 *  position = [1, 2, 3]
 *  length =   [1, 2, 3]
 * 
 *  output = [[b'e', b'ev', b'lve'],
 *            [b'h', b'ur', b'tee'],
 *            [b'i', b've', b'hte'],
 *            [b'i', b'en', b'nty']]
 *  }
* * Broadcasting {@code input} onto {@code pos} and {@code len}: * *
{@code
 *  input = b'thirteen'
 *  position = [1, 5, 7]
 *  length =   [3, 2, 1]
 * 
 *  output = [b'hir', b'ee', b'n']
 *  }
* * Arguments: * * scope: A Scope object * * input: Tensor of strings * * pos: Scalar defining the position of first character in each substring * * len: Scalar defining the number of characters to include in each substring * * Returns: * * {@code Output}: Tensor of substrings */ @Namespace("tensorflow::ops") @NoOffset public static class Substr extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Substr(Pointer p) { super(p); } public Substr(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input pos, @ByVal Input len) { super((Pointer)null); allocate(scope, input, pos, len); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input, @ByVal Input pos, @ByVal Input len); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Substr operation(Operation operation); public native @ByRef Output output(); public native Substr output(Output output); } /** Determine the script codes of a given tensor of Unicode integer code points. * * This operation converts Unicode code points to script codes corresponding to * each code point. Script codes correspond to International Components for * Unicode (ICU) UScriptCode values. See http://icu-project.org/apiref/icu4c/uscript_8h.html. * Returns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will * match input shape. * * Arguments: * * scope: A Scope object * * input: A Tensor of int32 Unicode code points. * * Returns: * * {@code Output}: A Tensor of int32 script codes corresponding to each input code point. */ @Namespace("tensorflow::ops") @NoOffset public static class UnicodeScript extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UnicodeScript(Pointer p) { super(p); } public UnicodeScript(@Const @ByRef Scope scope, @ByVal Input input) { super((Pointer)null); allocate(scope, input); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input input); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native UnicodeScript operation(Operation operation); public native @ByRef Output output(); public native UnicodeScript output(Output output); } /** \} */ // namespace ops // namespace tensorflow // #endif // TENSORFLOW_CC_OPS_STRING_OPS_H_ // Parsed from tensorflow/cc/ops/training_ops.h // This file is MACHINE GENERATED! Do not edit. // #ifndef TENSORFLOW_CC_OPS_TRAINING_OPS_H_ // #define TENSORFLOW_CC_OPS_TRAINING_OPS_H_ // This file is MACHINE GENERATED! Do not edit. // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/cc/framework/scope.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/gtl/array_slice.h" /** \defgroup training_ops Training Ops * \{

* Update '*var' according to the adadelta scheme. * * accum = rho() * accum + (1 - rho()) * grad.square(); * update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; * update_accum = rho() * update_accum + (1 - rho()) * update.square(); * var -= update; * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * accum_update: Should be from a Variable(). * * lr: Scaling factor. Must be a scalar. * * rho: Decay factor. Must be a scalar. * * epsilon: Constant factor. Must be a scalar. * * grad: The gradient. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, updating of the var, accum and update_accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class ApplyAdadelta extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApplyAdadelta(Pointer p) { super(p); } /** Optional attribute setters for ApplyAdadelta */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, updating of the var, accum and update_accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ApplyAdadelta(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input accum_update, @ByVal Input lr, @ByVal Input rho, @ByVal Input epsilon, @ByVal Input grad) { super((Pointer)null); allocate(scope, var, accum, accum_update, lr, rho, epsilon, grad); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input accum_update, @ByVal Input lr, @ByVal Input rho, @ByVal Input epsilon, @ByVal Input grad); public ApplyAdadelta(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input accum_update, @ByVal Input lr, @ByVal Input rho, @ByVal Input epsilon, @ByVal Input grad, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, accum_update, lr, rho, epsilon, grad, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input accum_update, @ByVal Input lr, @ByVal Input rho, @ByVal Input epsilon, @ByVal Input grad, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ApplyAdadelta operation(Operation operation); public native @ByRef Output out(); public native ApplyAdadelta out(Output out); } /** Update '*var' according to the adagrad scheme. * * accum += grad * grad * var -= lr * grad * (1 / sqrt(accum)) * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * lr: Scaling factor. Must be a scalar. * * grad: The gradient. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class ApplyAdagrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApplyAdagrad(Pointer p) { super(p); } /** Optional attribute setters for ApplyAdagrad */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); /** Defaults to true */ public native @ByVal Attrs UpdateSlots(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); public native @Cast("bool") boolean update_slots_(); public native Attrs update_slots_(boolean update_slots_); } public ApplyAdagrad(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad) { super((Pointer)null); allocate(scope, var, accum, lr, grad); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad); public ApplyAdagrad(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, lr, grad, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public static native @ByVal Attrs UpdateSlots(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ApplyAdagrad operation(Operation operation); public native @ByRef Output out(); public native ApplyAdagrad out(Output out); } /** Update '*var' according to the proximal adagrad scheme. * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * gradient_accumulator: Should be from a Variable(). * * gradient_squared_accumulator: Should be from a Variable(). * * grad: The gradient. * * lr: Scaling factor. Must be a scalar. * * l1: L1 regularization. Must be a scalar. * * l2: L2 regularization. Must be a scalar. * * global_step: Training step number. Must be a scalar. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class ApplyAdagradDA extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApplyAdagradDA(Pointer p) { super(p); } /** Optional attribute setters for ApplyAdagradDA */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ApplyAdagradDA(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input gradient_accumulator, @ByVal Input gradient_squared_accumulator, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input global_step) { super((Pointer)null); allocate(scope, var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input gradient_accumulator, @ByVal Input gradient_squared_accumulator, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input global_step); public ApplyAdagradDA(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input gradient_accumulator, @ByVal Input gradient_squared_accumulator, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input global_step, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input gradient_accumulator, @ByVal Input gradient_squared_accumulator, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input global_step, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ApplyAdagradDA operation(Operation operation); public native @ByRef Output out(); public native ApplyAdagradDA out(Output out); } /** Update '*var' according to the Adam algorithm. * * $$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$ * $$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$ * $$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$ * $$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$ * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * m: Should be from a Variable(). * * v: Should be from a Variable(). * * beta1_power: Must be a scalar. * * beta2_power: Must be a scalar. * * lr: Scaling factor. Must be a scalar. * * beta1: Momentum factor. Must be a scalar. * * beta2: Momentum factor. Must be a scalar. * * epsilon: Ridge term. Must be a scalar. * * grad: The gradient. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var, m, and v tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * use_nesterov: If {@code True}, uses the nesterov update. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class ApplyAdam extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApplyAdam(Pointer p) { super(p); } /** Optional attribute setters for ApplyAdam */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var, m, and v tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ /// public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); /** If {@code True}, uses the nesterov update. * * Defaults to false */ public native @ByVal Attrs UseNesterov(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); public native @Cast("bool") boolean use_nesterov_(); public native Attrs use_nesterov_(boolean use_nesterov_); } public ApplyAdam(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input v, @ByVal Input beta1_power, @ByVal Input beta2_power, @ByVal Input lr, @ByVal Input beta1, @ByVal Input beta2, @ByVal Input epsilon, @ByVal Input grad) { super((Pointer)null); allocate(scope, var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input v, @ByVal Input beta1_power, @ByVal Input beta2_power, @ByVal Input lr, @ByVal Input beta1, @ByVal Input beta2, @ByVal Input epsilon, @ByVal Input grad); public ApplyAdam(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input v, @ByVal Input beta1_power, @ByVal Input beta2_power, @ByVal Input lr, @ByVal Input beta1, @ByVal Input beta2, @ByVal Input epsilon, @ByVal Input grad, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input v, @ByVal Input beta1_power, @ByVal Input beta2_power, @ByVal Input lr, @ByVal Input beta1, @ByVal Input beta2, @ByVal Input epsilon, @ByVal Input grad, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public static native @ByVal Attrs UseNesterov(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ApplyAdam operation(Operation operation); public native @ByRef Output out(); public native ApplyAdam out(Output out); } /** Update '*var' according to the AddSign update. * * m_t <- beta1 * m_{t-1} + (1 - beta1) * g * update <- (alpha + sign_decay * sign(g) *sign(m)) * g * variable <- variable - lr_t * update * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * m: Should be from a Variable(). * * lr: Scaling factor. Must be a scalar. * * alpha: Must be a scalar. * * sign_decay: Must be a scalar. * * beta: Must be a scalar. * * grad: The gradient. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var and m tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class ApplyAddSign extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApplyAddSign(Pointer p) { super(p); } /** Optional attribute setters for ApplyAddSign */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var and m tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ApplyAddSign(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input lr, @ByVal Input alpha, @ByVal Input sign_decay, @ByVal Input beta, @ByVal Input grad) { super((Pointer)null); allocate(scope, var, m, lr, alpha, sign_decay, beta, grad); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input lr, @ByVal Input alpha, @ByVal Input sign_decay, @ByVal Input beta, @ByVal Input grad); public ApplyAddSign(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input lr, @ByVal Input alpha, @ByVal Input sign_decay, @ByVal Input beta, @ByVal Input grad, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, m, lr, alpha, sign_decay, beta, grad, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input lr, @ByVal Input alpha, @ByVal Input sign_decay, @ByVal Input beta, @ByVal Input grad, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ApplyAddSign operation(Operation operation); public native @ByRef Output out(); public native ApplyAddSign out(Output out); } /** Update '*var' according to the centered RMSProp algorithm. * * The centered RMSProp algorithm uses an estimate of the centered second moment * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. * * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient * * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) * * mg <- rho * mg_{t-1} + (1-rho) * grad * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) * var <- var - mom * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * mg: Should be from a Variable(). * * ms: Should be from a Variable(). * * mom: Should be from a Variable(). * * lr: Scaling factor. Must be a scalar. * * rho: Decay rate. Must be a scalar. * * epsilon: Ridge term. Must be a scalar. * * grad: The gradient. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var, mg, ms, and mom tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class ApplyCenteredRMSProp extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApplyCenteredRMSProp(Pointer p) { super(p); } /** Optional attribute setters for ApplyCenteredRMSProp */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var, mg, ms, and mom tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ApplyCenteredRMSProp(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input mg, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad) { super((Pointer)null); allocate(scope, var, mg, ms, mom, lr, rho, momentum, epsilon, grad); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input mg, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad); public ApplyCenteredRMSProp(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input mg, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, mg, ms, mom, lr, rho, momentum, epsilon, grad, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input mg, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ApplyCenteredRMSProp operation(Operation operation); public native @ByRef Output out(); public native ApplyCenteredRMSProp out(Output out); } /** Update '*var' according to the Ftrl-proximal scheme. * * accum_new = accum + grad * grad * linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * linear: Should be from a Variable(). * * grad: The gradient. * * lr: Scaling factor. Must be a scalar. * * l1: L1 regulariation. Must be a scalar. * * l2: L2 regulariation. Must be a scalar. * * lr_power: Scaling factor. Must be a scalar. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class ApplyFtrl extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApplyFtrl(Pointer p) { super(p); } /** Optional attribute setters for ApplyFtrl */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ApplyFtrl(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input lr_power) { super((Pointer)null); allocate(scope, var, accum, linear, grad, lr, l1, l2, lr_power); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input lr_power); public ApplyFtrl(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input lr_power, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, linear, grad, lr, l1, l2, lr_power, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input lr_power, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ApplyFtrl operation(Operation operation); public native @ByRef Output out(); public native ApplyFtrl out(Output out); } /** Update '*var' according to the Ftrl-proximal scheme. * * grad_with_shrinkage = grad + 2 * l2_shrinkage * var * accum_new = accum + grad_with_shrinkage * grad_with_shrinkage * linear += grad_with_shrinkage + * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * linear: Should be from a Variable(). * * grad: The gradient. * * lr: Scaling factor. Must be a scalar. * * l1: L1 regulariation. Must be a scalar. * * l2: L2 shrinkage regulariation. Must be a scalar. * * lr_power: Scaling factor. Must be a scalar. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class ApplyFtrlV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApplyFtrlV2(Pointer p) { super(p); } /** Optional attribute setters for ApplyFtrlV2 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ApplyFtrlV2(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input l2_shrinkage, @ByVal Input lr_power) { super((Pointer)null); allocate(scope, var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input l2_shrinkage, @ByVal Input lr_power); public ApplyFtrlV2(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input l2_shrinkage, @ByVal Input lr_power, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input l2_shrinkage, @ByVal Input lr_power, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ApplyFtrlV2 operation(Operation operation); public native @ByRef Output out(); public native ApplyFtrlV2 out(Output out); } /** Update '*var' by subtracting 'alpha' * 'delta' from it. * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * alpha: Scaling factor. Must be a scalar. * * delta: The change. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class ApplyGradientDescent extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApplyGradientDescent(Pointer p) { super(p); } /** Optional attribute setters for ApplyGradientDescent */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ApplyGradientDescent(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input delta) { super((Pointer)null); allocate(scope, var, alpha, delta); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input delta); public ApplyGradientDescent(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input delta, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, alpha, delta, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input delta, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ApplyGradientDescent operation(Operation operation); public native @ByRef Output out(); public native ApplyGradientDescent out(Output out); } /** Update '*var' according to the momentum scheme. Set use_nesterov = True if you * * want to use Nesterov momentum. * * accum = accum * momentum + grad * var -= lr * accum * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * lr: Scaling factor. Must be a scalar. * * grad: The gradient. * * momentum: Momentum. Must be a scalar. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * use_nesterov: If {@code True}, the tensor passed to compute grad will be * var - lr * momentum * accum, so in the end, the var you get is actually * var - lr * momentum * accum. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class ApplyMomentum extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApplyMomentum(Pointer p) { super(p); } /** Optional attribute setters for ApplyMomentum */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ /// public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); /** If {@code True}, the tensor passed to compute grad will be * var - lr * momentum * accum, so in the end, the var you get is actually * var - lr * momentum * accum. * * Defaults to false */ public native @ByVal Attrs UseNesterov(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); public native @Cast("bool") boolean use_nesterov_(); public native Attrs use_nesterov_(boolean use_nesterov_); } public ApplyMomentum(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input momentum) { super((Pointer)null); allocate(scope, var, accum, lr, grad, momentum); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input momentum); public ApplyMomentum(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input momentum, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, lr, grad, momentum, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input momentum, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public static native @ByVal Attrs UseNesterov(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ApplyMomentum operation(Operation operation); public native @ByRef Output out(); public native ApplyMomentum out(Output out); } /** Update '*var' according to the AddSign update. * * m_t <- beta1 * m_{t-1} + (1 - beta1) * g * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g * variable <- variable - lr_t * update * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * m: Should be from a Variable(). * * lr: Scaling factor. Must be a scalar. * * logbase: Must be a scalar. * * sign_decay: Must be a scalar. * * beta: Must be a scalar. * * grad: The gradient. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var and m tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class ApplyPowerSign extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApplyPowerSign(Pointer p) { super(p); } /** Optional attribute setters for ApplyPowerSign */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var and m tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ApplyPowerSign(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input lr, @ByVal Input logbase, @ByVal Input sign_decay, @ByVal Input beta, @ByVal Input grad) { super((Pointer)null); allocate(scope, var, m, lr, logbase, sign_decay, beta, grad); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input lr, @ByVal Input logbase, @ByVal Input sign_decay, @ByVal Input beta, @ByVal Input grad); public ApplyPowerSign(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input lr, @ByVal Input logbase, @ByVal Input sign_decay, @ByVal Input beta, @ByVal Input grad, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, m, lr, logbase, sign_decay, beta, grad, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input lr, @ByVal Input logbase, @ByVal Input sign_decay, @ByVal Input beta, @ByVal Input grad, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ApplyPowerSign operation(Operation operation); public native @ByRef Output out(); public native ApplyPowerSign out(Output out); } /** Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. * * accum += grad * grad * prox_v = var - lr * grad * (1 / sqrt(accum)) * var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * lr: Scaling factor. Must be a scalar. * * l1: L1 regularization. Must be a scalar. * * l2: L2 regularization. Must be a scalar. * * grad: The gradient. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class ApplyProximalAdagrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApplyProximalAdagrad(Pointer p) { super(p); } /** Optional attribute setters for ApplyProximalAdagrad */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ApplyProximalAdagrad(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad) { super((Pointer)null); allocate(scope, var, accum, lr, l1, l2, grad); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad); public ApplyProximalAdagrad(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, lr, l1, l2, grad, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ApplyProximalAdagrad operation(Operation operation); public native @ByRef Output out(); public native ApplyProximalAdagrad out(Output out); } /** Update '*var' as FOBOS algorithm with fixed learning rate. * * prox_v = var - alpha * delta * var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * alpha: Scaling factor. Must be a scalar. * * l1: L1 regularization. Must be a scalar. * * l2: L2 regularization. Must be a scalar. * * delta: The change. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class ApplyProximalGradientDescent extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApplyProximalGradientDescent(Pointer p) { super(p); } /** Optional attribute setters for ApplyProximalGradientDescent */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ApplyProximalGradientDescent(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input l1, @ByVal Input l2, @ByVal Input delta) { super((Pointer)null); allocate(scope, var, alpha, l1, l2, delta); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input l1, @ByVal Input l2, @ByVal Input delta); public ApplyProximalGradientDescent(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input l1, @ByVal Input l2, @ByVal Input delta, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, alpha, l1, l2, delta, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input l1, @ByVal Input l2, @ByVal Input delta, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ApplyProximalGradientDescent operation(Operation operation); public native @ByRef Output out(); public native ApplyProximalGradientDescent out(Output out); } /** Update '*var' according to the RMSProp algorithm. * * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) * * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * ms: Should be from a Variable(). * * mom: Should be from a Variable(). * * lr: Scaling factor. Must be a scalar. * * rho: Decay rate. Must be a scalar. * * epsilon: Ridge term. Must be a scalar. * * grad: The gradient. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var, ms, and mom tensors is protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class ApplyRMSProp extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ApplyRMSProp(Pointer p) { super(p); } /** Optional attribute setters for ApplyRMSProp */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var, ms, and mom tensors is protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ApplyRMSProp(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad) { super((Pointer)null); allocate(scope, var, ms, mom, lr, rho, momentum, epsilon, grad); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad); public ApplyRMSProp(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, ms, mom, lr, rho, momentum, epsilon, grad, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ApplyRMSProp operation(Operation operation); public native @ByRef Output out(); public native ApplyRMSProp out(Output out); } /** Update '*var' according to the adadelta scheme. * * accum = rho() * accum + (1 - rho()) * grad.square(); * update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; * update_accum = rho() * update_accum + (1 - rho()) * update.square(); * var -= update; * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * accum_update: Should be from a Variable(). * * lr: Scaling factor. Must be a scalar. * * rho: Decay factor. Must be a scalar. * * epsilon: Constant factor. Must be a scalar. * * grad: The gradient. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, updating of the var, accum and update_accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceApplyAdadelta extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceApplyAdadelta(Pointer p) { super(p); } /** Optional attribute setters for ResourceApplyAdadelta */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, updating of the var, accum and update_accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceApplyAdadelta(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input accum_update, @ByVal Input lr, @ByVal Input rho, @ByVal Input epsilon, @ByVal Input grad) { super((Pointer)null); allocate(scope, var, accum, accum_update, lr, rho, epsilon, grad); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input accum_update, @ByVal Input lr, @ByVal Input rho, @ByVal Input epsilon, @ByVal Input grad); public ResourceApplyAdadelta(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input accum_update, @ByVal Input lr, @ByVal Input rho, @ByVal Input epsilon, @ByVal Input grad, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, accum_update, lr, rho, epsilon, grad, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input accum_update, @ByVal Input lr, @ByVal Input rho, @ByVal Input epsilon, @ByVal Input grad, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceApplyAdadelta operation(Operation operation); } /** Update '*var' according to the adagrad scheme. * * accum += grad * grad * var -= lr * grad * (1 / sqrt(accum)) * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * lr: Scaling factor. Must be a scalar. * * grad: The gradient. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceApplyAdagrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceApplyAdagrad(Pointer p) { super(p); } /** Optional attribute setters for ResourceApplyAdagrad */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); /** Defaults to true */ public native @ByVal Attrs UpdateSlots(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); public native @Cast("bool") boolean update_slots_(); public native Attrs update_slots_(boolean update_slots_); } public ResourceApplyAdagrad(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad) { super((Pointer)null); allocate(scope, var, accum, lr, grad); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad); public ResourceApplyAdagrad(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, lr, grad, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public static native @ByVal Attrs UpdateSlots(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceApplyAdagrad operation(Operation operation); } /** Update '*var' according to the proximal adagrad scheme. * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * gradient_accumulator: Should be from a Variable(). * * gradient_squared_accumulator: Should be from a Variable(). * * grad: The gradient. * * lr: Scaling factor. Must be a scalar. * * l1: L1 regularization. Must be a scalar. * * l2: L2 regularization. Must be a scalar. * * global_step: Training step number. Must be a scalar. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceApplyAdagradDA extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceApplyAdagradDA(Pointer p) { super(p); } /** Optional attribute setters for ResourceApplyAdagradDA */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceApplyAdagradDA(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input gradient_accumulator, @ByVal Input gradient_squared_accumulator, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input global_step) { super((Pointer)null); allocate(scope, var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input gradient_accumulator, @ByVal Input gradient_squared_accumulator, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input global_step); public ResourceApplyAdagradDA(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input gradient_accumulator, @ByVal Input gradient_squared_accumulator, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input global_step, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input gradient_accumulator, @ByVal Input gradient_squared_accumulator, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input global_step, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceApplyAdagradDA operation(Operation operation); } /** Update '*var' according to the Adam algorithm. * * $$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$ * $$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$ * $$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$ * $$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$ * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * m: Should be from a Variable(). * * v: Should be from a Variable(). * * beta1_power: Must be a scalar. * * beta2_power: Must be a scalar. * * lr: Scaling factor. Must be a scalar. * * beta1: Momentum factor. Must be a scalar. * * beta2: Momentum factor. Must be a scalar. * * epsilon: Ridge term. Must be a scalar. * * grad: The gradient. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var, m, and v tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * use_nesterov: If {@code True}, uses the nesterov update. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceApplyAdam extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceApplyAdam(Pointer p) { super(p); } /** Optional attribute setters for ResourceApplyAdam */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var, m, and v tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ /// public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); /** If {@code True}, uses the nesterov update. * * Defaults to false */ public native @ByVal Attrs UseNesterov(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); public native @Cast("bool") boolean use_nesterov_(); public native Attrs use_nesterov_(boolean use_nesterov_); } public ResourceApplyAdam(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input v, @ByVal Input beta1_power, @ByVal Input beta2_power, @ByVal Input lr, @ByVal Input beta1, @ByVal Input beta2, @ByVal Input epsilon, @ByVal Input grad) { super((Pointer)null); allocate(scope, var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input v, @ByVal Input beta1_power, @ByVal Input beta2_power, @ByVal Input lr, @ByVal Input beta1, @ByVal Input beta2, @ByVal Input epsilon, @ByVal Input grad); public ResourceApplyAdam(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input v, @ByVal Input beta1_power, @ByVal Input beta2_power, @ByVal Input lr, @ByVal Input beta1, @ByVal Input beta2, @ByVal Input epsilon, @ByVal Input grad, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input v, @ByVal Input beta1_power, @ByVal Input beta2_power, @ByVal Input lr, @ByVal Input beta1, @ByVal Input beta2, @ByVal Input epsilon, @ByVal Input grad, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public static native @ByVal Attrs UseNesterov(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceApplyAdam operation(Operation operation); } /** Update '*var' according to the AddSign update. * * m_t <- beta1 * m_{t-1} + (1 - beta1) * g * update <- (alpha + sign_decay * sign(g) *sign(m)) * g * variable <- variable - lr_t * update * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * m: Should be from a Variable(). * * lr: Scaling factor. Must be a scalar. * * alpha: Must be a scalar. * * sign_decay: Must be a scalar. * * beta: Must be a scalar. * * grad: The gradient. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var and m tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceApplyAddSign extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceApplyAddSign(Pointer p) { super(p); } /** Optional attribute setters for ResourceApplyAddSign */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var and m tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceApplyAddSign(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input lr, @ByVal Input alpha, @ByVal Input sign_decay, @ByVal Input beta, @ByVal Input grad) { super((Pointer)null); allocate(scope, var, m, lr, alpha, sign_decay, beta, grad); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input lr, @ByVal Input alpha, @ByVal Input sign_decay, @ByVal Input beta, @ByVal Input grad); public ResourceApplyAddSign(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input lr, @ByVal Input alpha, @ByVal Input sign_decay, @ByVal Input beta, @ByVal Input grad, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, m, lr, alpha, sign_decay, beta, grad, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input lr, @ByVal Input alpha, @ByVal Input sign_decay, @ByVal Input beta, @ByVal Input grad, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceApplyAddSign operation(Operation operation); } /** Update '*var' according to the centered RMSProp algorithm. * * The centered RMSProp algorithm uses an estimate of the centered second moment * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. * * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient * * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) * * mg <- rho * mg_{t-1} + (1-rho) * grad * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) * var <- var - mom * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * mg: Should be from a Variable(). * * ms: Should be from a Variable(). * * mom: Should be from a Variable(). * * lr: Scaling factor. Must be a scalar. * * rho: Decay rate. Must be a scalar. * * epsilon: Ridge term. Must be a scalar. * * grad: The gradient. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var, mg, ms, and mom tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceApplyCenteredRMSProp extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceApplyCenteredRMSProp(Pointer p) { super(p); } /** Optional attribute setters for ResourceApplyCenteredRMSProp */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var, mg, ms, and mom tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceApplyCenteredRMSProp(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input mg, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad) { super((Pointer)null); allocate(scope, var, mg, ms, mom, lr, rho, momentum, epsilon, grad); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input mg, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad); public ResourceApplyCenteredRMSProp(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input mg, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, mg, ms, mom, lr, rho, momentum, epsilon, grad, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input mg, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceApplyCenteredRMSProp operation(Operation operation); } /** Update '*var' according to the Ftrl-proximal scheme. * * accum_new = accum + grad * grad * linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * linear: Should be from a Variable(). * * grad: The gradient. * * lr: Scaling factor. Must be a scalar. * * l1: L1 regulariation. Must be a scalar. * * l2: L2 regulariation. Must be a scalar. * * lr_power: Scaling factor. Must be a scalar. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceApplyFtrl extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceApplyFtrl(Pointer p) { super(p); } /** Optional attribute setters for ResourceApplyFtrl */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceApplyFtrl(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input lr_power) { super((Pointer)null); allocate(scope, var, accum, linear, grad, lr, l1, l2, lr_power); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input lr_power); public ResourceApplyFtrl(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input lr_power, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, linear, grad, lr, l1, l2, lr_power, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input lr_power, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceApplyFtrl operation(Operation operation); } /** Update '*var' according to the Ftrl-proximal scheme. * * grad_with_shrinkage = grad + 2 * l2_shrinkage * var * accum_new = accum + grad_with_shrinkage * grad_with_shrinkage * linear += grad_with_shrinkage + * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * linear: Should be from a Variable(). * * grad: The gradient. * * lr: Scaling factor. Must be a scalar. * * l1: L1 regulariation. Must be a scalar. * * l2: L2 shrinkage regulariation. Must be a scalar. * * lr_power: Scaling factor. Must be a scalar. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceApplyFtrlV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceApplyFtrlV2(Pointer p) { super(p); } /** Optional attribute setters for ResourceApplyFtrlV2 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceApplyFtrlV2(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input l2_shrinkage, @ByVal Input lr_power) { super((Pointer)null); allocate(scope, var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input l2_shrinkage, @ByVal Input lr_power); public ResourceApplyFtrlV2(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input l2_shrinkage, @ByVal Input lr_power, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input l2_shrinkage, @ByVal Input lr_power, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceApplyFtrlV2 operation(Operation operation); } /** Update '*var' by subtracting 'alpha' * 'delta' from it. * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * alpha: Scaling factor. Must be a scalar. * * delta: The change. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceApplyGradientDescent extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceApplyGradientDescent(Pointer p) { super(p); } /** Optional attribute setters for ResourceApplyGradientDescent */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceApplyGradientDescent(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input delta) { super((Pointer)null); allocate(scope, var, alpha, delta); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input delta); public ResourceApplyGradientDescent(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input delta, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, alpha, delta, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input delta, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceApplyGradientDescent operation(Operation operation); } /** Update '*var' according to the momentum scheme. Set use_nesterov = True if you * * want to use Nesterov momentum. * * accum = accum * momentum + grad * var -= lr * accum * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * lr: Scaling factor. Must be a scalar. * * grad: The gradient. * * momentum: Momentum. Must be a scalar. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * use_nesterov: If {@code True}, the tensor passed to compute grad will be * var - lr * momentum * accum, so in the end, the var you get is actually * var - lr * momentum * accum. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceApplyMomentum extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceApplyMomentum(Pointer p) { super(p); } /** Optional attribute setters for ResourceApplyMomentum */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ /// public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); /** If {@code True}, the tensor passed to compute grad will be * var - lr * momentum * accum, so in the end, the var you get is actually * var - lr * momentum * accum. * * Defaults to false */ public native @ByVal Attrs UseNesterov(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); public native @Cast("bool") boolean use_nesterov_(); public native Attrs use_nesterov_(boolean use_nesterov_); } public ResourceApplyMomentum(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input momentum) { super((Pointer)null); allocate(scope, var, accum, lr, grad, momentum); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input momentum); public ResourceApplyMomentum(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input momentum, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, lr, grad, momentum, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input momentum, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public static native @ByVal Attrs UseNesterov(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceApplyMomentum operation(Operation operation); } /** Update '*var' according to the AddSign update. * * m_t <- beta1 * m_{t-1} + (1 - beta1) * g * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g * variable <- variable - lr_t * update * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * m: Should be from a Variable(). * * lr: Scaling factor. Must be a scalar. * * logbase: Must be a scalar. * * sign_decay: Must be a scalar. * * beta: Must be a scalar. * * grad: The gradient. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var and m tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceApplyPowerSign extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceApplyPowerSign(Pointer p) { super(p); } /** Optional attribute setters for ResourceApplyPowerSign */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var and m tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceApplyPowerSign(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input lr, @ByVal Input logbase, @ByVal Input sign_decay, @ByVal Input beta, @ByVal Input grad) { super((Pointer)null); allocate(scope, var, m, lr, logbase, sign_decay, beta, grad); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input lr, @ByVal Input logbase, @ByVal Input sign_decay, @ByVal Input beta, @ByVal Input grad); public ResourceApplyPowerSign(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input lr, @ByVal Input logbase, @ByVal Input sign_decay, @ByVal Input beta, @ByVal Input grad, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, m, lr, logbase, sign_decay, beta, grad, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input m, @ByVal Input lr, @ByVal Input logbase, @ByVal Input sign_decay, @ByVal Input beta, @ByVal Input grad, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceApplyPowerSign operation(Operation operation); } /** Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. * * accum += grad * grad * prox_v = var - lr * grad * (1 / sqrt(accum)) * var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * lr: Scaling factor. Must be a scalar. * * l1: L1 regularization. Must be a scalar. * * l2: L2 regularization. Must be a scalar. * * grad: The gradient. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceApplyProximalAdagrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceApplyProximalAdagrad(Pointer p) { super(p); } /** Optional attribute setters for ResourceApplyProximalAdagrad */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceApplyProximalAdagrad(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad) { super((Pointer)null); allocate(scope, var, accum, lr, l1, l2, grad); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad); public ResourceApplyProximalAdagrad(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, lr, l1, l2, grad, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceApplyProximalAdagrad operation(Operation operation); } /** Update '*var' as FOBOS algorithm with fixed learning rate. * * prox_v = var - alpha * delta * var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * alpha: Scaling factor. Must be a scalar. * * l1: L1 regularization. Must be a scalar. * * l2: L2 regularization. Must be a scalar. * * delta: The change. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceApplyProximalGradientDescent extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceApplyProximalGradientDescent(Pointer p) { super(p); } /** Optional attribute setters for ResourceApplyProximalGradientDescent */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceApplyProximalGradientDescent(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input l1, @ByVal Input l2, @ByVal Input delta) { super((Pointer)null); allocate(scope, var, alpha, l1, l2, delta); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input l1, @ByVal Input l2, @ByVal Input delta); public ResourceApplyProximalGradientDescent(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input l1, @ByVal Input l2, @ByVal Input delta, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, alpha, l1, l2, delta, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input l1, @ByVal Input l2, @ByVal Input delta, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceApplyProximalGradientDescent operation(Operation operation); } /** Update '*var' according to the RMSProp algorithm. * * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) * * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * ms: Should be from a Variable(). * * mom: Should be from a Variable(). * * lr: Scaling factor. Must be a scalar. * * rho: Decay rate. Must be a scalar. * * epsilon: Ridge term. Must be a scalar. * * grad: The gradient. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var, ms, and mom tensors is protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceApplyRMSProp extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceApplyRMSProp(Pointer p) { super(p); } /** Optional attribute setters for ResourceApplyRMSProp */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var, ms, and mom tensors is protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceApplyRMSProp(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad) { super((Pointer)null); allocate(scope, var, ms, mom, lr, rho, momentum, epsilon, grad); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad); public ResourceApplyRMSProp(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, ms, mom, lr, rho, momentum, epsilon, grad, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceApplyRMSProp operation(Operation operation); } /** var: Should be from a Variable(). * * Arguments: * * scope: A Scope object * * accum: Should be from a Variable(). * * accum_update: : Should be from a Variable(). * * lr: Learning rate. Must be a scalar. * * rho: Decay factor. Must be a scalar. * * epsilon: Constant factor. Must be a scalar. * * grad: The gradient. * * indices: A vector of indices into the first dimension of var and accum. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceSparseApplyAdadelta extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceSparseApplyAdadelta(Pointer p) { super(p); } /** Optional attribute setters for ResourceSparseApplyAdadelta */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceSparseApplyAdadelta(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input accum_update, @ByVal Input lr, @ByVal Input rho, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices) { super((Pointer)null); allocate(scope, var, accum, accum_update, lr, rho, epsilon, grad, indices); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input accum_update, @ByVal Input lr, @ByVal Input rho, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices); public ResourceSparseApplyAdadelta(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input accum_update, @ByVal Input lr, @ByVal Input rho, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, accum_update, lr, rho, epsilon, grad, indices, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input accum_update, @ByVal Input lr, @ByVal Input rho, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceSparseApplyAdadelta operation(Operation operation); } /** Update relevant entries in '*var' and '*accum' according to the adagrad scheme. * * That is for rows we have grad for, we update var and accum as follows: * accum += grad * grad * var -= lr * grad * (1 / sqrt(accum)) * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * lr: Learning rate. Must be a scalar. * * grad: The gradient. * * indices: A vector of indices into the first dimension of var and accum. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceSparseApplyAdagrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceSparseApplyAdagrad(Pointer p) { super(p); } /** Optional attribute setters for ResourceSparseApplyAdagrad */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); /** Defaults to true */ public native @ByVal Attrs UpdateSlots(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); public native @Cast("bool") boolean update_slots_(); public native Attrs update_slots_(boolean update_slots_); } public ResourceSparseApplyAdagrad(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input indices) { super((Pointer)null); allocate(scope, var, accum, lr, grad, indices); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input indices); public ResourceSparseApplyAdagrad(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, lr, grad, indices, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public static native @ByVal Attrs UpdateSlots(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceSparseApplyAdagrad operation(Operation operation); } /** Update entries in '*var' and '*accum' according to the proximal adagrad scheme. * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * gradient_accumulator: Should be from a Variable(). * * gradient_squared_accumulator: Should be from a Variable(). * * grad: The gradient. * * indices: A vector of indices into the first dimension of var and accum. * * lr: Learning rate. Must be a scalar. * * l1: L1 regularization. Must be a scalar. * * l2: L2 regularization. Must be a scalar. * * global_step: Training step number. Must be a scalar. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceSparseApplyAdagradDA extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceSparseApplyAdagradDA(Pointer p) { super(p); } /** Optional attribute setters for ResourceSparseApplyAdagradDA */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceSparseApplyAdagradDA(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input gradient_accumulator, @ByVal Input gradient_squared_accumulator, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input global_step) { super((Pointer)null); allocate(scope, var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input gradient_accumulator, @ByVal Input gradient_squared_accumulator, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input global_step); public ResourceSparseApplyAdagradDA(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input gradient_accumulator, @ByVal Input gradient_squared_accumulator, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input global_step, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input gradient_accumulator, @ByVal Input gradient_squared_accumulator, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input global_step, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceSparseApplyAdagradDA operation(Operation operation); } /** Update '*var' according to the centered RMSProp algorithm. * * The centered RMSProp algorithm uses an estimate of the centered second moment * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. * * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) * * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * mg: Should be from a Variable(). * * ms: Should be from a Variable(). * * mom: Should be from a Variable(). * * lr: Scaling factor. Must be a scalar. * * rho: Decay rate. Must be a scalar. * * epsilon: Ridge term. Must be a scalar. * * grad: The gradient. * * indices: A vector of indices into the first dimension of var, ms and mom. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var, mg, ms, and mom tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceSparseApplyCenteredRMSProp extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceSparseApplyCenteredRMSProp(Pointer p) { super(p); } /** Optional attribute setters for ResourceSparseApplyCenteredRMSProp */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var, mg, ms, and mom tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceSparseApplyCenteredRMSProp(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input mg, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices) { super((Pointer)null); allocate(scope, var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input mg, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices); public ResourceSparseApplyCenteredRMSProp(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input mg, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input mg, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceSparseApplyCenteredRMSProp operation(Operation operation); } /** Update relevant entries in '*var' according to the Ftrl-proximal scheme. * * That is for rows we have grad for, we update var, accum and linear as follows: * accum_new = accum + grad * grad * linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * linear: Should be from a Variable(). * * grad: The gradient. * * indices: A vector of indices into the first dimension of var and accum. * * lr: Scaling factor. Must be a scalar. * * l1: L1 regularization. Must be a scalar. * * l2: L2 regularization. Must be a scalar. * * lr_power: Scaling factor. Must be a scalar. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceSparseApplyFtrl extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceSparseApplyFtrl(Pointer p) { super(p); } /** Optional attribute setters for ResourceSparseApplyFtrl */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceSparseApplyFtrl(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input lr_power) { super((Pointer)null); allocate(scope, var, accum, linear, grad, indices, lr, l1, l2, lr_power); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input lr_power); public ResourceSparseApplyFtrl(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input lr_power, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, linear, grad, indices, lr, l1, l2, lr_power, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input lr_power, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceSparseApplyFtrl operation(Operation operation); } /** Update relevant entries in '*var' according to the Ftrl-proximal scheme. * * That is for rows we have grad for, we update var, accum and linear as follows: * grad_with_shrinkage = grad + 2 * l2_shrinkage * var * accum_new = accum + grad_with_shrinkage * grad_with_shrinkage * linear += grad_with_shrinkage + * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * linear: Should be from a Variable(). * * grad: The gradient. * * indices: A vector of indices into the first dimension of var and accum. * * lr: Scaling factor. Must be a scalar. * * l1: L1 regularization. Must be a scalar. * * l2: L2 shrinkage regulariation. Must be a scalar. * * lr_power: Scaling factor. Must be a scalar. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceSparseApplyFtrlV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceSparseApplyFtrlV2(Pointer p) { super(p); } /** Optional attribute setters for ResourceSparseApplyFtrlV2 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceSparseApplyFtrlV2(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input l2_shrinkage, @ByVal Input lr_power) { super((Pointer)null); allocate(scope, var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input l2_shrinkage, @ByVal Input lr_power); public ResourceSparseApplyFtrlV2(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input l2_shrinkage, @ByVal Input lr_power, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input l2_shrinkage, @ByVal Input lr_power, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceSparseApplyFtrlV2 operation(Operation operation); } /** Update relevant entries in '*var' and '*accum' according to the momentum scheme. * * Set use_nesterov = True if you want to use Nesterov momentum. * * That is for rows we have grad for, we update var and accum as follows: * * accum = accum * momentum + grad * var -= lr * accum * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * lr: Learning rate. Must be a scalar. * * grad: The gradient. * * indices: A vector of indices into the first dimension of var and accum. * * momentum: Momentum. Must be a scalar. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * use_nesterov: If {@code True}, the tensor passed to compute grad will be * var - lr * momentum * accum, so in the end, the var you get is actually * var - lr * momentum * accum. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceSparseApplyMomentum extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceSparseApplyMomentum(Pointer p) { super(p); } /** Optional attribute setters for ResourceSparseApplyMomentum */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ /// public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); /** If {@code True}, the tensor passed to compute grad will be * var - lr * momentum * accum, so in the end, the var you get is actually * var - lr * momentum * accum. * * Defaults to false */ public native @ByVal Attrs UseNesterov(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); public native @Cast("bool") boolean use_nesterov_(); public native Attrs use_nesterov_(boolean use_nesterov_); } public ResourceSparseApplyMomentum(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input indices, @ByVal Input momentum) { super((Pointer)null); allocate(scope, var, accum, lr, grad, indices, momentum); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input indices, @ByVal Input momentum); public ResourceSparseApplyMomentum(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input indices, @ByVal Input momentum, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, lr, grad, indices, momentum, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input indices, @ByVal Input momentum, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public static native @ByVal Attrs UseNesterov(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceSparseApplyMomentum operation(Operation operation); } /** Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. * * That is for rows we have grad for, we update var and accum as follows: * accum += grad * grad * prox_v = var * prox_v -= lr * grad * (1 / sqrt(accum)) * var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * lr: Learning rate. Must be a scalar. * * l1: L1 regularization. Must be a scalar. * * l2: L2 regularization. Must be a scalar. * * grad: The gradient. * * indices: A vector of indices into the first dimension of var and accum. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceSparseApplyProximalAdagrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceSparseApplyProximalAdagrad(Pointer p) { super(p); } /** Optional attribute setters for ResourceSparseApplyProximalAdagrad */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceSparseApplyProximalAdagrad(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad, @ByVal Input indices) { super((Pointer)null); allocate(scope, var, accum, lr, l1, l2, grad, indices); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad, @ByVal Input indices); public ResourceSparseApplyProximalAdagrad(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, lr, l1, l2, grad, indices, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceSparseApplyProximalAdagrad operation(Operation operation); } /** Sparse update '*var' as FOBOS algorithm with fixed learning rate. * * That is for rows we have grad for, we update var as follows: * prox_v = var - alpha * grad * var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * alpha: Scaling factor. Must be a scalar. * * l1: L1 regularization. Must be a scalar. * * l2: L2 regularization. Must be a scalar. * * grad: The gradient. * * indices: A vector of indices into the first dimension of var and accum. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceSparseApplyProximalGradientDescent extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceSparseApplyProximalGradientDescent(Pointer p) { super(p); } /** Optional attribute setters for ResourceSparseApplyProximalGradientDescent */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceSparseApplyProximalGradientDescent(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad, @ByVal Input indices) { super((Pointer)null); allocate(scope, var, alpha, l1, l2, grad, indices); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad, @ByVal Input indices); public ResourceSparseApplyProximalGradientDescent(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, alpha, l1, l2, grad, indices, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceSparseApplyProximalGradientDescent operation(Operation operation); } /** Update '*var' according to the RMSProp algorithm. * * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) * * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * ms: Should be from a Variable(). * * mom: Should be from a Variable(). * * lr: Scaling factor. Must be a scalar. * * rho: Decay rate. Must be a scalar. * * epsilon: Ridge term. Must be a scalar. * * grad: The gradient. * * indices: A vector of indices into the first dimension of var, ms and mom. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var, ms, and mom tensors is protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * the created {@code Operation} */ @Namespace("tensorflow::ops") @NoOffset public static class ResourceSparseApplyRMSProp extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ResourceSparseApplyRMSProp(Pointer p) { super(p); } /** Optional attribute setters for ResourceSparseApplyRMSProp */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var, ms, and mom tensors is protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public ResourceSparseApplyRMSProp(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices) { super((Pointer)null); allocate(scope, var, ms, mom, lr, rho, momentum, epsilon, grad, indices); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices); public ResourceSparseApplyRMSProp(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, ms, mom, lr, rho, momentum, epsilon, grad, indices, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Operation") Operation asOperation(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native ResourceSparseApplyRMSProp operation(Operation operation); } /** var: Should be from a Variable(). * * Arguments: * * scope: A Scope object * * accum: Should be from a Variable(). * * accum_update: : Should be from a Variable(). * * lr: Learning rate. Must be a scalar. * * rho: Decay factor. Must be a scalar. * * epsilon: Constant factor. Must be a scalar. * * grad: The gradient. * * indices: A vector of indices into the first dimension of var and accum. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class SparseApplyAdadelta extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseApplyAdadelta(Pointer p) { super(p); } /** Optional attribute setters for SparseApplyAdadelta */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public SparseApplyAdadelta(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input accum_update, @ByVal Input lr, @ByVal Input rho, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices) { super((Pointer)null); allocate(scope, var, accum, accum_update, lr, rho, epsilon, grad, indices); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input accum_update, @ByVal Input lr, @ByVal Input rho, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices); public SparseApplyAdadelta(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input accum_update, @ByVal Input lr, @ByVal Input rho, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, accum_update, lr, rho, epsilon, grad, indices, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input accum_update, @ByVal Input lr, @ByVal Input rho, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native SparseApplyAdadelta operation(Operation operation); public native @ByRef Output out(); public native SparseApplyAdadelta out(Output out); } /** Update relevant entries in '*var' and '*accum' according to the adagrad scheme. * * That is for rows we have grad for, we update var and accum as follows: * $$accum += grad * grad$$ * $$var -= lr * grad * (1 / sqrt(accum))$$ * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * lr: Learning rate. Must be a scalar. * * grad: The gradient. * * indices: A vector of indices into the first dimension of var and accum. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class SparseApplyAdagrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseApplyAdagrad(Pointer p) { super(p); } /** Optional attribute setters for SparseApplyAdagrad */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); /** Defaults to true */ public native @ByVal Attrs UpdateSlots(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); public native @Cast("bool") boolean update_slots_(); public native Attrs update_slots_(boolean update_slots_); } public SparseApplyAdagrad(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input indices) { super((Pointer)null); allocate(scope, var, accum, lr, grad, indices); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input indices); public SparseApplyAdagrad(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, lr, grad, indices, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public static native @ByVal Attrs UpdateSlots(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native SparseApplyAdagrad operation(Operation operation); public native @ByRef Output out(); public native SparseApplyAdagrad out(Output out); } /** Update entries in '*var' and '*accum' according to the proximal adagrad scheme. * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * gradient_accumulator: Should be from a Variable(). * * gradient_squared_accumulator: Should be from a Variable(). * * grad: The gradient. * * indices: A vector of indices into the first dimension of var and accum. * * lr: Learning rate. Must be a scalar. * * l1: L1 regularization. Must be a scalar. * * l2: L2 regularization. Must be a scalar. * * global_step: Training step number. Must be a scalar. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class SparseApplyAdagradDA extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseApplyAdagradDA(Pointer p) { super(p); } /** Optional attribute setters for SparseApplyAdagradDA */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public SparseApplyAdagradDA(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input gradient_accumulator, @ByVal Input gradient_squared_accumulator, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input global_step) { super((Pointer)null); allocate(scope, var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input gradient_accumulator, @ByVal Input gradient_squared_accumulator, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input global_step); public SparseApplyAdagradDA(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input gradient_accumulator, @ByVal Input gradient_squared_accumulator, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input global_step, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input gradient_accumulator, @ByVal Input gradient_squared_accumulator, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input global_step, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native SparseApplyAdagradDA operation(Operation operation); public native @ByRef Output out(); public native SparseApplyAdagradDA out(Output out); } /** Update '*var' according to the centered RMSProp algorithm. * * The centered RMSProp algorithm uses an estimate of the centered second moment * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. * * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) * * $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ * $$var <- var - mom$$ * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * mg: Should be from a Variable(). * * ms: Should be from a Variable(). * * mom: Should be from a Variable(). * * lr: Scaling factor. Must be a scalar. * * rho: Decay rate. Must be a scalar. * * epsilon: Ridge term. Must be a scalar. * * grad: The gradient. * * indices: A vector of indices into the first dimension of var, ms and mom. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var, mg, ms, and mom tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class SparseApplyCenteredRMSProp extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseApplyCenteredRMSProp(Pointer p) { super(p); } /** Optional attribute setters for SparseApplyCenteredRMSProp */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var, mg, ms, and mom tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public SparseApplyCenteredRMSProp(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input mg, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices) { super((Pointer)null); allocate(scope, var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input mg, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices); public SparseApplyCenteredRMSProp(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input mg, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input mg, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native SparseApplyCenteredRMSProp operation(Operation operation); public native @ByRef Output out(); public native SparseApplyCenteredRMSProp out(Output out); } /** Update relevant entries in '*var' according to the Ftrl-proximal scheme. * * That is for rows we have grad for, we update var, accum and linear as follows: * $$accum_new = accum + grad * grad$$ * $$linear += grad + (accum_{new}^{-lr_{power}} - accum^{-lr_{power}} / lr * var$$ * $$quadratic = 1.0 / (accum_{new}^{lr_{power}} * lr) + 2 * l2$$ * $$var = (sign(linear) * l1 - linear) / quadratic\ if\ |linear| > l1\ else\ 0.0$$ * $$accum = accum_{new}$$ * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * linear: Should be from a Variable(). * * grad: The gradient. * * indices: A vector of indices into the first dimension of var and accum. * * lr: Scaling factor. Must be a scalar. * * l1: L1 regularization. Must be a scalar. * * l2: L2 regularization. Must be a scalar. * * lr_power: Scaling factor. Must be a scalar. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class SparseApplyFtrl extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseApplyFtrl(Pointer p) { super(p); } /** Optional attribute setters for SparseApplyFtrl */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public SparseApplyFtrl(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input lr_power) { super((Pointer)null); allocate(scope, var, accum, linear, grad, indices, lr, l1, l2, lr_power); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input lr_power); public SparseApplyFtrl(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input lr_power, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, linear, grad, indices, lr, l1, l2, lr_power, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input lr_power, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native SparseApplyFtrl operation(Operation operation); public native @ByRef Output out(); public native SparseApplyFtrl out(Output out); } /** Update relevant entries in '*var' according to the Ftrl-proximal scheme. * * That is for rows we have grad for, we update var, accum and linear as follows: * grad_with_shrinkage = grad + 2 * l2_shrinkage * var * accum_new = accum + grad_with_shrinkage * grad_with_shrinkage * linear += grad_with_shrinkage + * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * linear: Should be from a Variable(). * * grad: The gradient. * * indices: A vector of indices into the first dimension of var and accum. * * lr: Scaling factor. Must be a scalar. * * l1: L1 regularization. Must be a scalar. * * l2: L2 shrinkage regulariation. Must be a scalar. * * lr_power: Scaling factor. Must be a scalar. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class SparseApplyFtrlV2 extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseApplyFtrlV2(Pointer p) { super(p); } /** Optional attribute setters for SparseApplyFtrlV2 */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public SparseApplyFtrlV2(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input l2_shrinkage, @ByVal Input lr_power) { super((Pointer)null); allocate(scope, var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input l2_shrinkage, @ByVal Input lr_power); public SparseApplyFtrlV2(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input l2_shrinkage, @ByVal Input lr_power, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input linear, @ByVal Input grad, @ByVal Input indices, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input l2_shrinkage, @ByVal Input lr_power, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native SparseApplyFtrlV2 operation(Operation operation); public native @ByRef Output out(); public native SparseApplyFtrlV2 out(Output out); } /** Update relevant entries in '*var' and '*accum' according to the momentum scheme. * * Set use_nesterov = True if you want to use Nesterov momentum. * * That is for rows we have grad for, we update var and accum as follows: * * $$accum = accum * momentum + grad$$ * $$var -= lr * accum$$ * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * lr: Learning rate. Must be a scalar. * * grad: The gradient. * * indices: A vector of indices into the first dimension of var and accum. * * momentum: Momentum. Must be a scalar. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * use_nesterov: If {@code True}, the tensor passed to compute grad will be * var - lr * momentum * accum, so in the end, the var you get is actually * var - lr * momentum * accum. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class SparseApplyMomentum extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseApplyMomentum(Pointer p) { super(p); } /** Optional attribute setters for SparseApplyMomentum */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ /// public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); /** If {@code True}, the tensor passed to compute grad will be * var - lr * momentum * accum, so in the end, the var you get is actually * var - lr * momentum * accum. * * Defaults to false */ public native @ByVal Attrs UseNesterov(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); public native @Cast("bool") boolean use_nesterov_(); public native Attrs use_nesterov_(boolean use_nesterov_); } public SparseApplyMomentum(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input indices, @ByVal Input momentum) { super((Pointer)null); allocate(scope, var, accum, lr, grad, indices, momentum); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input indices, @ByVal Input momentum); public SparseApplyMomentum(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input indices, @ByVal Input momentum, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, lr, grad, indices, momentum, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input grad, @ByVal Input indices, @ByVal Input momentum, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public static native @ByVal Attrs UseNesterov(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native SparseApplyMomentum operation(Operation operation); public native @ByRef Output out(); public native SparseApplyMomentum out(Output out); } /** Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. * * That is for rows we have grad for, we update var and accum as follows: * $$accum += grad * grad$$ * $$prox_v = var$$ * $$prox_v -= lr * grad * (1 / sqrt(accum))$$ * $$var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}$$ * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * accum: Should be from a Variable(). * * lr: Learning rate. Must be a scalar. * * l1: L1 regularization. Must be a scalar. * * l2: L2 regularization. Must be a scalar. * * grad: The gradient. * * indices: A vector of indices into the first dimension of var and accum. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class SparseApplyProximalAdagrad extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseApplyProximalAdagrad(Pointer p) { super(p); } /** Optional attribute setters for SparseApplyProximalAdagrad */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public SparseApplyProximalAdagrad(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad, @ByVal Input indices) { super((Pointer)null); allocate(scope, var, accum, lr, l1, l2, grad, indices); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad, @ByVal Input indices); public SparseApplyProximalAdagrad(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, accum, lr, l1, l2, grad, indices, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input accum, @ByVal Input lr, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native SparseApplyProximalAdagrad operation(Operation operation); public native @ByRef Output out(); public native SparseApplyProximalAdagrad out(Output out); } /** Sparse update '*var' as FOBOS algorithm with fixed learning rate. * * That is for rows we have grad for, we update var as follows: * $$prox_v = var - alpha * grad$$ * $$var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}$$ * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * alpha: Scaling factor. Must be a scalar. * * l1: L1 regularization. Must be a scalar. * * l2: L2 regularization. Must be a scalar. * * grad: The gradient. * * indices: A vector of indices into the first dimension of var and accum. * * Optional attributes (see {@code Attrs}): * * use_locking: If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class SparseApplyProximalGradientDescent extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseApplyProximalGradientDescent(Pointer p) { super(p); } /** Optional attribute setters for SparseApplyProximalGradientDescent */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public SparseApplyProximalGradientDescent(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad, @ByVal Input indices) { super((Pointer)null); allocate(scope, var, alpha, l1, l2, grad, indices); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad, @ByVal Input indices); public SparseApplyProximalGradientDescent(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, alpha, l1, l2, grad, indices, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input alpha, @ByVal Input l1, @ByVal Input l2, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native SparseApplyProximalGradientDescent operation(Operation operation); public native @ByRef Output out(); public native SparseApplyProximalGradientDescent out(Output out); } /** Update '*var' according to the RMSProp algorithm. * * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) * * $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ * $$var <- var - mom$$ * * Arguments: * * scope: A Scope object * * var: Should be from a Variable(). * * ms: Should be from a Variable(). * * mom: Should be from a Variable(). * * lr: Scaling factor. Must be a scalar. * * rho: Decay rate. Must be a scalar. * * epsilon: Ridge term. Must be a scalar. * * grad: The gradient. * * indices: A vector of indices into the first dimension of var, ms and mom. * * Optional attributes (see {@code Attrs}): * * use_locking: If {@code True}, updating of the var, ms, and mom tensors is protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Returns: * * {@code Output}: Same as "var". */ @Namespace("tensorflow::ops") @NoOffset public static class SparseApplyRMSProp extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SparseApplyRMSProp(Pointer p) { super(p); } /** Optional attribute setters for SparseApplyRMSProp */ public static class Attrs extends Pointer { static { Loader.load(); } /** Default native constructor. */ public Attrs() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public Attrs(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Attrs(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); @Override public Attrs position(long position) { return (Attrs)super.position(position); } /** If {@code True}, updating of the var, ms, and mom tensors is protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * * Defaults to false */ public native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @Cast("bool") boolean use_locking_(); public native Attrs use_locking_(boolean use_locking_); } public SparseApplyRMSProp(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices) { super((Pointer)null); allocate(scope, var, ms, mom, lr, rho, momentum, epsilon, grad, indices); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices); public SparseApplyRMSProp(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs) { super((Pointer)null); allocate(scope, var, ms, mom, lr, rho, momentum, epsilon, grad, indices, attrs); } private native void allocate(@Const @ByRef Scope scope, @ByVal Input var, @ByVal Input ms, @ByVal Input mom, @ByVal Input lr, @ByVal Input rho, @ByVal Input momentum, @ByVal Input epsilon, @ByVal Input grad, @ByVal Input indices, @Const @ByRef Attrs attrs); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public static native @ByVal Attrs UseLocking(@Cast("bool") boolean x); public native @ByRef Operation operation(); public native SparseApplyRMSProp operation(Operation operation); public native @ByRef Output out(); public native SparseApplyRMSProp out(Output out); } /** \} */ // namespace ops // namespace tensorflow // #endif // TENSORFLOW_CC_OPS_TRAINING_OPS_H_ // Parsed from tensorflow/cc/ops/user_ops.h // This file is MACHINE GENERATED! Do not edit. // #ifndef TENSORFLOW_CC_OPS_USER_OPS_H_ // #define TENSORFLOW_CC_OPS_USER_OPS_H_ // This file is MACHINE GENERATED! Do not edit. // #include "tensorflow/cc/framework/ops.h" // #include "tensorflow/cc/framework/scope.h" // #include "tensorflow/core/framework/tensor.h" // #include "tensorflow/core/framework/tensor_shape.h" // #include "tensorflow/core/framework/types.h" // #include "tensorflow/core/lib/gtl/array_slice.h" /** \defgroup user_ops User Ops * \{

* Output a fact about factorials. * * Arguments: * * scope: A Scope object * * Returns: * * {@code Output}: The fact tensor. */ @Namespace("tensorflow::ops") @NoOffset public static class Fact extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Fact(Pointer p) { super(p); } public Fact(@Const @ByRef Scope scope) { super((Pointer)null); allocate(scope); } private native void allocate(@Const @ByRef Scope scope); public native @ByVal @Name("operator tensorflow::Output") Output asOutput(); public native @ByVal @Name("operator tensorflow::Input") Input asInput(); public native Node node(); public native @ByRef Operation operation(); public native Fact operation(Operation operation); public native @ByRef Output fact(); public native Fact fact(Output fact); } /** \} */ // namespace ops // namespace tensorflow // #endif // TENSORFLOW_CC_OPS_USER_OPS_H_ }





© 2015 - 2024 Weber Informatics LLC | Privacy Policy