![JAR search and dependency download from the Maven repository](/logo.png)
org.bytedeco.javacpp.tensorflow Maven / Gradle / Ivy
// Targeted by JavaCPP version 1.2: DO NOT EDIT THIS FILE
package org.bytedeco.javacpp;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;
public class tensorflow extends org.bytedeco.javacpp.helper.tensorflow {
static { Loader.load(); }
@Name("tensorflow::gtl::InlinedVector") public static class LongVector extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public LongVector(Pointer p) { super(p); }
public LongVector() { allocate(); }
private native void allocate();
public native @Name("operator=") @ByRef LongVector put(@ByRef LongVector x);
public native long size();
@Index public native @Cast("tensorflow::int64") long get(@Cast("size_t") long i);
public native LongVector put(@Cast("size_t") long i, long value);
}
@Name("tensorflow::gtl::InlinedVector") public static class DataTypeVector extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public DataTypeVector(Pointer p) { super(p); }
public DataTypeVector() { allocate(); }
private native void allocate();
public native @Name("operator=") @ByRef DataTypeVector put(@ByRef DataTypeVector x);
public native long size();
@Index public native @Cast("tensorflow::DataType") int get(@Cast("size_t") long i);
public native DataTypeVector put(@Cast("size_t") long i, int value);
}
@Name("google::protobuf::Map") public static class StringAttrValueMap extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public StringAttrValueMap(Pointer p) { super(p); }
public StringAttrValueMap() { allocate(); }
private native void allocate();
public native @Name("operator=") @ByRef StringAttrValueMap put(@ByRef StringAttrValueMap x);
public native long size();
@Index public native @ByRef AttrValue get(@StdString BytePointer i);
public native StringAttrValueMap put(@StdString BytePointer i, AttrValue value);
public native @ByVal Iterator begin();
public native @ByVal Iterator end();
@NoOffset @Name("iterator") public static class Iterator extends Pointer {
public Iterator(Pointer p) { super(p); }
public Iterator() { }
public native @Name("operator++") @ByRef Iterator increment();
public native @Name("operator==") boolean equals(@ByRef Iterator it);
public native @Name("operator*().first") @MemberGetter @StdString BytePointer first();
public native @Name("operator*().second") @MemberGetter @ByRef AttrValue second();
}
}
@Name("std::vector") public static class StringVector extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public StringVector(Pointer p) { super(p); }
public StringVector(BytePointer ... array) { this(array.length); put(array); }
public StringVector(String ... array) { this(array.length); put(array); }
public StringVector() { allocate(); }
public StringVector(long n) { allocate(n); }
private native void allocate();
private native void allocate(@Cast("size_t") long n);
public native @Name("operator=") @ByRef StringVector put(@ByRef StringVector x);
public native long size();
public native void resize(@Cast("size_t") long n);
@Index public native @StdString BytePointer get(@Cast("size_t") long i);
public native StringVector put(@Cast("size_t") long i, BytePointer value);
@ValueSetter @Index public native StringVector put(@Cast("size_t") long i, @StdString String value);
public StringVector put(BytePointer ... array) {
if (size() != array.length) { resize(array.length); }
for (int i = 0; i < array.length; i++) {
put(i, array[i]);
}
return this;
}
public StringVector put(String ... array) {
if (size() != array.length) { resize(array.length); }
for (int i = 0; i < array.length; i++) {
put(i, array[i]);
}
return this;
}
}
@Name("std::vector") public static class TensorVector extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TensorVector(Pointer p) { super(p); }
public TensorVector(Tensor ... array) { this(array.length); put(array); }
public TensorVector() { allocate(); }
public TensorVector(long n) { allocate(n); }
private native void allocate();
private native void allocate(@Cast("size_t") long n);
public native @Name("operator=") @ByRef TensorVector put(@ByRef TensorVector x);
public native long size();
public native void resize(@Cast("size_t") long n);
@Index public native @ByRef Tensor get(@Cast("size_t") long i);
public native TensorVector put(@Cast("size_t") long i, Tensor value);
public TensorVector put(Tensor ... array) {
if (size() != array.length) { resize(array.length); }
for (int i = 0; i < array.length; i++) {
put(i, array[i]);
}
return this;
}
}
@Name("std::vector") public static class TensorProtoVector extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TensorProtoVector(Pointer p) { super(p); }
public TensorProtoVector(TensorProto ... array) { this(array.length); put(array); }
public TensorProtoVector() { allocate(); }
public TensorProtoVector(long n) { allocate(n); }
private native void allocate();
private native void allocate(@Cast("size_t") long n);
public native @Name("operator=") @ByRef TensorProtoVector put(@ByRef TensorProtoVector x);
public native long size();
public native void resize(@Cast("size_t") long n);
@Index public native @ByRef TensorProto get(@Cast("size_t") long i);
public native TensorProtoVector put(@Cast("size_t") long i, TensorProto value);
public TensorProtoVector put(TensorProto ... array) {
if (size() != array.length) { resize(array.length); }
for (int i = 0; i < array.length; i++) {
put(i, array[i]);
}
return this;
}
}
@Name("std::vector") public static class TensorShapeVector extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TensorShapeVector(Pointer p) { super(p); }
public TensorShapeVector(TensorShape ... array) { this(array.length); put(array); }
public TensorShapeVector() { allocate(); }
public TensorShapeVector(long n) { allocate(n); }
private native void allocate();
private native void allocate(@Cast("size_t") long n);
public native @Name("operator=") @ByRef TensorShapeVector put(@ByRef TensorShapeVector x);
public native long size();
public native void resize(@Cast("size_t") long n);
@Index public native @ByRef TensorShape get(@Cast("size_t") long i);
public native TensorShapeVector put(@Cast("size_t") long i, TensorShape value);
public TensorShapeVector put(TensorShape ... array) {
if (size() != array.length) { resize(array.length); }
for (int i = 0; i < array.length; i++) {
put(i, array[i]);
}
return this;
}
}
@Name("std::vector") public static class NodeOutVector extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public NodeOutVector(Pointer p) { super(p); }
public NodeOutVector(NodeBuilder.NodeOut ... array) { this(array.length); put(array); }
public NodeOutVector() { allocate(); }
public NodeOutVector(long n) { allocate(n); }
private native void allocate();
private native void allocate(@Cast("size_t") long n);
public native @Name("operator=") @ByRef NodeOutVector put(@ByRef NodeOutVector x);
public native long size();
public native void resize(@Cast("size_t") long n);
@Index public native @ByRef NodeBuilder.NodeOut get(@Cast("size_t") long i);
public native NodeOutVector put(@Cast("size_t") long i, NodeBuilder.NodeOut value);
public NodeOutVector put(NodeBuilder.NodeOut ... array) {
if (size() != array.length) { resize(array.length); }
for (int i = 0; i < array.length; i++) {
put(i, array[i]);
}
return this;
}
}
@Name("std::vector") public static class NodeVector extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public NodeVector(Pointer p) { super(p); }
public NodeVector(Node ... array) { this(array.length); put(array); }
public NodeVector() { allocate(); }
public NodeVector(long n) { allocate(n); }
private native void allocate();
private native void allocate(@Cast("size_t") long n);
public native @Name("operator=") @ByRef NodeVector put(@ByRef NodeVector x);
public native long size();
public native void resize(@Cast("size_t") long n);
@Index public native Node get(@Cast("size_t") long i);
public native NodeVector put(@Cast("size_t") long i, Node value);
public NodeVector put(Node ... array) {
if (size() != array.length) { resize(array.length); }
for (int i = 0; i < array.length; i++) {
put(i, array[i]);
}
return this;
}
}
@Name("std::vector >") public static class StringTensorPairVector extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public StringTensorPairVector(Pointer p) { super(p); }
public StringTensorPairVector(BytePointer[] firstValue, Tensor[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); }
public StringTensorPairVector(String[] firstValue, Tensor[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); }
public StringTensorPairVector() { allocate(); }
public StringTensorPairVector(long n) { allocate(n); }
private native void allocate();
private native void allocate(@Cast("size_t") long n);
public native @Name("operator=") @ByRef StringTensorPairVector put(@ByRef StringTensorPairVector x);
public native long size();
public native void resize(@Cast("size_t") long n);
@Index public native @StdString BytePointer first(@Cast("size_t") long i); public native StringTensorPairVector first(@Cast("size_t") long i, BytePointer first);
@Index public native @ByRef Tensor second(@Cast("size_t") long i); public native StringTensorPairVector second(@Cast("size_t") long i, Tensor second);
@MemberSetter @Index public native StringTensorPairVector first(@Cast("size_t") long i, @StdString String first);
public StringTensorPairVector put(BytePointer[] firstValue, Tensor[] secondValue) {
for (int i = 0; i < firstValue.length && i < secondValue.length; i++) {
first(i, firstValue[i]);
second(i, secondValue[i]);
}
return this;
}
public StringTensorPairVector put(String[] firstValue, Tensor[] secondValue) {
for (int i = 0; i < firstValue.length && i < secondValue.length; i++) {
first(i, firstValue[i]);
second(i, secondValue[i]);
}
return this;
}
}
@NoOffset @Name("std::pair") public static class EdgeSetBoolPair extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public EdgeSetBoolPair(Pointer p) { super(p); }
public EdgeSetBoolPair(EdgeSetIterator firstValue, boolean secondValue) { this(); put(firstValue, secondValue); }
public EdgeSetBoolPair() { allocate(); }
private native void allocate();
public native @Name("operator=") @ByRef EdgeSetBoolPair put(@ByRef EdgeSetBoolPair x);
@MemberGetter public native @ByRef EdgeSetIterator first(); public native EdgeSetBoolPair first(EdgeSetIterator first);
@MemberGetter public native @Cast("bool") boolean second(); public native EdgeSetBoolPair second(boolean second);
public EdgeSetBoolPair put(EdgeSetIterator firstValue, boolean secondValue) {
first(firstValue);
second(secondValue);
return this;
}
}
// Parsed from tensorflow/core/platform/default/integral_types.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// #ifndef TENSORFLOW_PLATFORM_DEFAULT_INTEGRAL_TYPES_H_
// #define TENSORFLOW_PLATFORM_DEFAULT_INTEGRAL_TYPES_H_
// IWYU pragma: private, include "third_party/tensorflow/core/platform/types.h"
// IWYU pragma: friend third_party/tensorflow/core/platform/types.h
// namespace tensorflow
// #endif // TENSORFLOW_PLATFORM_DEFAULT_INTEGRAL_TYPES_H_
// Parsed from tensorflow/core/framework/numeric_types.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// #ifndef TENSORFLOW_FRAMEWORK_NUMERIC_TYPES_H_
// #define TENSORFLOW_FRAMEWORK_NUMERIC_TYPES_H_
// #include
// #include "tensorflow/core/platform/types.h"
// Single precision complex.
// Double precision complex.
// end namespace tensorflow
// #endif // TENSORFLOW_FRAMEWORK_NUMERIC_TYPES_H_
// Parsed from tensorflow/core/platform/init_main.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// #ifndef TENSORFLOW_PLATFORM_INIT_MAIN_H_
// #define TENSORFLOW_PLATFORM_INIT_MAIN_H_
// Platform-specific initialization routine that may be invoked by a
// main() program that uses TensorFlow.
//
// Default implementation does nothing.
@Namespace("tensorflow::port") public static native void InitMain(@Cast("const char*") BytePointer usage, IntPointer argc, @Cast("char***") PointerPointer argv);
@Namespace("tensorflow::port") public static native void InitMain(String usage, IntBuffer argc, @Cast("char***") PointerPointer argv);
@Namespace("tensorflow::port") public static native void InitMain(@Cast("const char*") BytePointer usage, int[] argc, @Cast("char***") PointerPointer argv);
@Namespace("tensorflow::port") public static native void InitMain(String usage, IntPointer argc, @Cast("char***") PointerPointer argv);
@Namespace("tensorflow::port") public static native void InitMain(@Cast("const char*") BytePointer usage, IntBuffer argc, @Cast("char***") PointerPointer argv);
@Namespace("tensorflow::port") public static native void InitMain(String usage, int[] argc, @Cast("char***") PointerPointer argv);
// namespace port
// namespace tensorflow
// #endif // TENSORFLOW_PLATFORM_INIT_MAIN_H_
// Parsed from tensorflow/core/platform/types.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// #ifndef TENSORFLOW_PLATFORM_TYPES_H_
// #define TENSORFLOW_PLATFORM_TYPES_H_
// #include
// #include "tensorflow/core/platform/platform.h"
// Include appropriate platform-dependent implementations
// #if defined(PLATFORM_GOOGLE)
// #include "tensorflow/core/platform/google/integral_types.h"
// #elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID) ||
// defined(PLATFORM_GOOGLE_ANDROID)
// #include "tensorflow/core/platform/default/integral_types.h"
// #else
// #error Define the appropriate PLATFORM_ macro for this platform
// #endif
// Define tensorflow::string to refer to appropriate platform specific type.
// TODO(josh11b): Move this into the platform/*/integral_types.h files
// above, and rename them platform/*/types.h.
// #if defined(PLATFORM_GOOGLE)
// #else
// #endif
@Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::uint8") byte kuint8max();
public static final byte kuint8max = kuint8max();
@Namespace("tensorflow") @MemberGetter public static native short kuint16max();
public static final short kuint16max = kuint16max();
@Namespace("tensorflow") @MemberGetter public static native int kuint32max();
public static final int kuint32max = kuint32max();
@Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::uint64") long kuint64max();
public static final long kuint64max = kuint64max();
@Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::int8") byte kint8min();
public static final byte kint8min = kint8min();
@Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::int8") byte kint8max();
public static final byte kint8max = kint8max();
@Namespace("tensorflow") @MemberGetter public static native short kint16min();
public static final short kint16min = kint16min();
@Namespace("tensorflow") @MemberGetter public static native short kint16max();
public static final short kint16max = kint16max();
@Namespace("tensorflow") @MemberGetter public static native int kint32min();
public static final int kint32min = kint32min();
@Namespace("tensorflow") @MemberGetter public static native int kint32max();
public static final int kint32max = kint32max();
@Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::int64") long kint64min();
public static final long kint64min = kint64min();
@Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::int64") long kint64max();
public static final long kint64max = kint64max();
// A typedef for a uint64 used as a short fingerprint.
// namespace tensorflow
// #endif // TENSORFLOW_PLATFORM_TYPES_H_
// Parsed from tensorflow/core/platform/mutex.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// #ifndef TENSORFLOW_PLATFORM_MUTEX_H_
// #define TENSORFLOW_PLATFORM_MUTEX_H_
// #include "tensorflow/core/platform/platform.h"
// #include "tensorflow/core/platform/types.h"
/** enum tensorflow::ConditionResult */
public static final int kCond_Timeout = 0, kCond_MaybeNotified = 1;
// namespace tensorflow
// Include appropriate platform-dependent implementations of mutex etc.
// #if defined(PLATFORM_GOOGLE)
// #include "tensorflow/core/platform/google/mutex.h"
// #elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID) ||
// defined(PLATFORM_GOOGLE_ANDROID)
// #include "tensorflow/core/platform/default/mutex.h"
// #else
// #error Define the appropriate PLATFORM_ macro for this platform
// #endif
// The mutex library included above defines:
// class mutex;
// class mutex_lock;
// class condition_variable;
// It also defines the following:
// Like "cv->wait(*mu)", except that it only waits for up to "ms" milliseconds.
//
// Returns kCond_Timeout if the timeout expired without this
// thread noticing a signal on the condition variable. Otherwise may
// return either kCond_Timeout or kCond_MaybeNotified
@Namespace("tensorflow") public static native @Cast("tensorflow::ConditionResult") int WaitForMilliseconds(@Cast("tensorflow::mutex_lock*") Pointer mu, @Cast("tensorflow::condition_variable*") Pointer cv,
@Cast("tensorflow::int64") long ms);
// namespace tensorflow
// #endif // TENSORFLOW_PLATFORM_MUTEX_H_
// Parsed from tensorflow/core/platform/macros.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// #ifndef TENSORFLOW_PLATFORM_MACROS_H_
// #define TENSORFLOW_PLATFORM_MACROS_H_
// Compiler attributes
// #if (defined(__GNUC__) || defined(__APPLE__)) && !defined(SWIG)
// Compiler supports GCC-style attributes
// #define TF_ATTRIBUTE_NORETURN __attribute__((noreturn))
// #define TF_ATTRIBUTE_NOINLINE __attribute__((noinline))
// #define TF_ATTRIBUTE_UNUSED __attribute__((unused))
// #define TF_ATTRIBUTE_COLD __attribute__((cold))
// #define TF_PACKED __attribute__((packed))
// #define TF_MUST_USE_RESULT __attribute__((warn_unused_result))
// #define TF_PRINTF_ATTRIBUTE(string_index, first_to_check)
// __attribute__((__format__(__printf__, string_index, first_to_check)))
// #define TF_SCANF_ATTRIBUTE(string_index, first_to_check)
// __attribute__((__format__(__scanf__, string_index, first_to_check)))
// #else
// Non-GCC equivalents
// #define TF_ATTRIBUTE_NORETURN
// #define TF_ATTRIBUTE_NOINLINE
// #define TF_ATTRIBUTE_UNUSED
// #define TF_ATTRIBUTE_COLD
// #define TF_MUST_USE_RESULT
// #define TF_PACKED
// #define TF_PRINTF_ATTRIBUTE(string_index, first_to_check)
// #define TF_SCANF_ATTRIBUTE(string_index, first_to_check)
// #endif
// GCC can be told that a certain branch is not likely to be taken (for
// instance, a CHECK failure), and use that information in static analysis.
// Giving it this information can help it optimize for the common case in
// the absence of better information (ie. -fprofile-arcs).
// #if defined(COMPILER_GCC3)
// #define TF_PREDICT_FALSE(x) (__builtin_expect(x, 0))
// #define TF_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
// #else
// #define TF_PREDICT_FALSE(x) (x)
// #define TF_PREDICT_TRUE(x) (x)
// #endif
// A macro to disallow the copy constructor and operator= functions
// This is usually placed in the private: declarations for a class.
// #define TF_DISALLOW_COPY_AND_ASSIGN(TypeName)
// TypeName(const TypeName&) = delete;
// void operator=(const TypeName&) = delete
// The TF_ARRAYSIZE(arr) macro returns the # of elements in an array arr.
//
// The expression TF_ARRAYSIZE(a) is a compile-time constant of type
// size_t.
// #define TF_ARRAYSIZE(a)
// ((sizeof(a) / sizeof(*(a))) /
// static_cast(!(sizeof(a) % sizeof(*(a)))))
// #if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L
// Define this to 1 if the code is compiled in C++11 mode; leave it
// undefined otherwise. Do NOT define it to 0 -- that causes
// '#ifdef LANG_CXX11' to behave differently from '#if LANG_CXX11'.
public static final int LANG_CXX11 = 1;
// #endif
// #if defined(__clang__) && defined(LANG_CXX11) && defined(__has_warning)
// #if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough")
// #define TF_FALLTHROUGH_INTENDED [[clang::fallthrough]] // NOLINT
// #endif
// #endif
// #ifndef TF_FALLTHROUGH_INTENDED
// #define TF_FALLTHROUGH_INTENDED
// do {
// } while (0)
// #endif
// #endif // TENSORFLOW_PLATFORM_MACROS_H_
// Parsed from tensorflow/core/util/port.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// #ifndef TENSORFLOW_UTIL_PORT_H_
// #define TENSORFLOW_UTIL_PORT_H_
// Returns true if GOOGLE_CUDA is defined.
@Namespace("tensorflow") public static native @Cast("bool") boolean IsGoogleCudaEnabled();
// end namespace tensorflow
// #endif // TENSORFLOW_UTIL_PORT_H_
// Parsed from tensorflow/core/lib/core/error_codes.pb.h
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/lib/core/error_codes.proto
// #ifndef PROTOBUF_tensorflow_2fcore_2flib_2fcore_2ferror_5fcodes_2eproto__INCLUDED
// #define PROTOBUF_tensorflow_2fcore_2flib_2fcore_2ferror_5fcodes_2eproto__INCLUDED
// #include
// #include
// #if GOOGLE_PROTOBUF_VERSION < 3000000
// #error This file was generated by a newer version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please update
// #error your headers.
// #endif
// #if 3000000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
// #error This file was generated by an older version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please
// #error regenerate this file with a newer version of protoc.
// #endif
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// @@protoc_insertion_point(includes)
// Internal implementation detail -- do not call these.
@Namespace("tensorflow::error") public static native void protobuf_AddDesc_tensorflow_2fcore_2flib_2fcore_2ferror_5fcodes_2eproto();
@Namespace("tensorflow::error") public static native void protobuf_AssignDesc_tensorflow_2fcore_2flib_2fcore_2ferror_5fcodes_2eproto();
@Namespace("tensorflow::error") public static native void protobuf_ShutdownFile_tensorflow_2fcore_2flib_2fcore_2ferror_5fcodes_2eproto();
/** enum tensorflow::error::Code */
public static final int
OK = 0,
CANCELLED = 1,
UNKNOWN = 2,
INVALID_ARGUMENT = 3,
DEADLINE_EXCEEDED = 4,
NOT_FOUND = 5,
ALREADY_EXISTS = 6,
PERMISSION_DENIED = 7,
UNAUTHENTICATED = 16,
RESOURCE_EXHAUSTED = 8,
FAILED_PRECONDITION = 9,
ABORTED = 10,
OUT_OF_RANGE = 11,
UNIMPLEMENTED = 12,
INTERNAL = 13,
UNAVAILABLE = 14,
DATA_LOSS = 15,
DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_ = 20,
Code_INT_MIN_SENTINEL_DO_NOT_USE_ =kint32min,
Code_INT_MAX_SENTINEL_DO_NOT_USE_ =kint32max;
@Namespace("tensorflow::error") public static native @Cast("bool") boolean Code_IsValid(int value);
@Namespace("tensorflow::error") @MemberGetter public static native @Cast("const tensorflow::error::Code") int Code_MIN();
@Namespace("tensorflow::error") @MemberGetter public static native @Cast("const tensorflow::error::Code") int Code_MAX();
@Namespace("tensorflow::error") @MemberGetter public static native int Code_ARRAYSIZE();
@Namespace("tensorflow::error") public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer Code_descriptor();
@Namespace("tensorflow::error") public static native @StdString BytePointer Code_Name(@Cast("tensorflow::error::Code") int value);
@Namespace("tensorflow::error") public static native @Cast("bool") boolean Code_Parse(
@StdString BytePointer name, @Cast("tensorflow::error::Code*") IntPointer value);
@Namespace("tensorflow::error") public static native @Cast("bool") boolean Code_Parse(
@StdString String name, @Cast("tensorflow::error::Code*") IntBuffer value);
@Namespace("tensorflow::error") public static native @Cast("bool") boolean Code_Parse(
@StdString BytePointer name, @Cast("tensorflow::error::Code*") int... value);
@Namespace("tensorflow::error") public static native @Cast("bool") boolean Code_Parse(
@StdString String name, @Cast("tensorflow::error::Code*") IntPointer value);
@Namespace("tensorflow::error") public static native @Cast("bool") boolean Code_Parse(
@StdString BytePointer name, @Cast("tensorflow::error::Code*") IntBuffer value);
@Namespace("tensorflow::error") public static native @Cast("bool") boolean Code_Parse(
@StdString String name, @Cast("tensorflow::error::Code*") int... value);
// ===================================================================
// ===================================================================
// ===================================================================
// #if !PROTOBUF_INLINE_NOT_IN_HEADERS
// #endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// @@protoc_insertion_point(namespace_scope)
// namespace error
// namespace tensorflow
// #ifndef SWIG
// #endif // SWIG
// @@protoc_insertion_point(global_scope)
// #endif // PROTOBUF_tensorflow_2fcore_2flib_2fcore_2ferror_5fcodes_2eproto__INCLUDED
// Parsed from tensorflow/core/platform/logging.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// #ifndef TENSORFLOW_PLATFORM_LOGGING_H_
// #define TENSORFLOW_PLATFORM_LOGGING_H_
// #include "tensorflow/core/platform/platform.h" // To pick up PLATFORM_define
// #if defined(PLATFORM_GOOGLE) || defined(PLATFORM_GOOGLE_ANDROID)
// #include "tensorflow/core/platform/google/build_config/logging.h"
// #else
// #include "tensorflow/core/platform/default/logging.h"
// #endif
// Some platforms require that filenames be of a certain form when
// used for logging. This function is invoked to allow platforms to
// adjust the filename used for logging appropriately, if necessary
// (most ports can just do nothing). If any changes are necessary, the
// implementation should mutate "*filename" appropriately.
@Namespace("tensorflow::port") public static native void AdjustFilenameForLogging(@StdString @Cast({"char*", "std::string*"}) BytePointer filename);
// namespace port
// namespace tensorflow
// #endif // TENSORFLOW_PLATFORM_LOGGING_H_
// Parsed from tensorflow/core/lib/core/status.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// #ifndef TENSORFLOW_CORE_LIB_CORE_STATUS_H_
// #define TENSORFLOW_CORE_LIB_CORE_STATUS_H_
// #include
// #include
// #include
// #include "tensorflow/core/lib/core/error_codes.pb.h"
// #include "tensorflow/core/lib/core/stringpiece.h"
// #include "tensorflow/core/platform/logging.h"
@Namespace("tensorflow") @NoOffset public static class Status extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Status(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public Status(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public Status position(long position) {
return (Status)super.position(position);
}
/** Create a success status. */
public Status() { super((Pointer)null); allocate(); }
private native void allocate();
/** \brief Create a status with the specified error code and msg as a
* human-readable string containing more detailed information. */
public Status(@Cast("tensorflow::error::Code") int code, @StringPiece BytePointer msg) { super((Pointer)null); allocate(code, msg); }
private native void allocate(@Cast("tensorflow::error::Code") int code, @StringPiece BytePointer msg);
public Status(@Cast("tensorflow::error::Code") int code, @StringPiece String msg) { super((Pointer)null); allocate(code, msg); }
private native void allocate(@Cast("tensorflow::error::Code") int code, @StringPiece String msg);
/** Copy the specified status. */
public Status(@Const @ByRef Status s) { super((Pointer)null); allocate(s); }
private native void allocate(@Const @ByRef Status s);
public native @Name("operator =") void put(@Const @ByRef Status s);
public static native @ByVal Status OK();
/** Returns true iff the status indicates success. */
public native @Cast("bool") boolean ok();
public native @Cast("tensorflow::error::Code") int code();
public native @StdString BytePointer error_message();
public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef Status x);
///
public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef Status x);
/** \brief If {@code ok()}, stores {@code new_status} into {@code *this}. If {@code !ok()},
* preserves the current status, but may augment with additional
* information about {@code new_status}.
*
* Convenient way of keeping track of the first error encountered.
* Instead of:
* {@code if (overall_status.ok()) overall_status = new_status}
* Use:
* {@code overall_status.Update(new_status);} */
public native void Update(@Const @ByRef Status new_status);
/** \brief Return a string representation of this status suitable for
* printing. Returns the string {@code "OK"} for success. */
public native @StdString BytePointer ToString();
}
@Namespace("tensorflow") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef Status x);
public static native void TF_CHECK_OK(@ByVal Status val);
public static native void TF_QCHECK_OK(@ByVal Status val);
// namespace tensorflow
// #endif // TENSORFLOW_CORE_LIB_CORE_STATUS_H_
// Parsed from tensorflow/core/platform/protobuf.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// #ifndef TENSORFLOW_PLATFORM_PROTOBUF_H_
// #define TENSORFLOW_PLATFORM_PROTOBUF_H_
// #include "tensorflow/core/platform/platform.h"
// #include "tensorflow/core/platform/types.h"
// Import whatever namespace protobuf comes from into the
// ::tensorflow::protobuf namespace.
//
// TensorFlow code should use the ::tensorflow::protobuf namespace to
// refer to all protobuf APIs.
// #if defined(PLATFORM_GOOGLE)
// #include "tensorflow/core/platform/google/build_config/protobuf.h"
// #elif defined(PLATFORM_GOOGLE_ANDROID)
// #include "tensorflow/core/platform/google/build_config/protobuf_android.h"
// #else
// #include "tensorflow/core/platform/default/protobuf.h"
// #endif
// Parses a protocol buffer contained in a string in the binary wire format.
// Returns true on success. Note: Unlike protobuf's builtin ParseFromString,
// this function has no size restrictions on the total size of the encoded
// protocol buffer.
@Namespace("tensorflow") public static native @Cast("bool") boolean ParseProtoUnlimited(@Cast("tensorflow::protobuf::Message*") Pointer proto, @StdString BytePointer serialized);
@Namespace("tensorflow") public static native @Cast("bool") boolean ParseProtoUnlimited(@Cast("tensorflow::protobuf::Message*") Pointer proto, @StdString String serialized);
@Namespace("tensorflow") public static native @Cast("bool") boolean ParseProtoUnlimited(@Cast("tensorflow::protobuf::Message*") Pointer proto, @Const Pointer serialized,
@Cast("size_t") long size);
// namespace tensorflow
// #endif // TENSORFLOW_PLATFORM_PROTOBUF_H_
// Parsed from tensorflow/core/platform/file_system.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// #ifndef TENSORFLOW_CORE_PLATFORM_FILE_SYSTEM_H_
// #define TENSORFLOW_CORE_PLATFORM_FILE_SYSTEM_H_
// #include
// #include
// #include
// #include
// #include
// #include "tensorflow/core/lib/core/errors.h"
// #include "tensorflow/core/lib/core/status.h"
// #include "tensorflow/core/lib/core/stringpiece.h"
// #include "tensorflow/core/platform/macros.h"
// #include "tensorflow/core/platform/protobuf.h"
// #include "tensorflow/core/platform/types.h"
/** An generic interface for accessing a file system. */
@Namespace("tensorflow") public static class FileSystem extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public FileSystem(Pointer p) { super(p); }
/** The following functions are the implementations used by the corresponding
* functions in the Env class. */
public native @ByVal Status NewRandomAccessFile(@StdString BytePointer fname,
@Cast("tensorflow::RandomAccessFile**") PointerPointer result);
public native @ByVal Status NewRandomAccessFile(@StdString BytePointer fname,
@ByPtrPtr RandomAccessFile result);
public native @ByVal Status NewRandomAccessFile(@StdString String fname,
@ByPtrPtr RandomAccessFile result);
public native @ByVal Status NewWritableFile(@StdString BytePointer fname,
@Cast("tensorflow::WritableFile**") PointerPointer result);
public native @ByVal Status NewWritableFile(@StdString BytePointer fname,
@ByPtrPtr WritableFile result);
public native @ByVal Status NewWritableFile(@StdString String fname,
@ByPtrPtr WritableFile result);
public native @ByVal Status NewAppendableFile(@StdString BytePointer fname,
@Cast("tensorflow::WritableFile**") PointerPointer result);
public native @ByVal Status NewAppendableFile(@StdString BytePointer fname,
@ByPtrPtr WritableFile result);
public native @ByVal Status NewAppendableFile(@StdString String fname,
@ByPtrPtr WritableFile result);
public native @ByVal Status NewReadOnlyMemoryRegionFromFile(
@StdString BytePointer fname, @Cast("tensorflow::ReadOnlyMemoryRegion**") PointerPointer result);
public native @ByVal Status NewReadOnlyMemoryRegionFromFile(
@StdString BytePointer fname, @ByPtrPtr ReadOnlyMemoryRegion result);
public native @ByVal Status NewReadOnlyMemoryRegionFromFile(
@StdString String fname, @ByPtrPtr ReadOnlyMemoryRegion result);
public native @Cast("bool") boolean FileExists(@StdString BytePointer fname);
public native @Cast("bool") boolean FileExists(@StdString String fname);
public native @ByVal Status GetChildren(@StdString BytePointer dir,
StringVector result);
public native @ByVal Status GetChildren(@StdString String dir,
StringVector result);
public native @ByVal Status DeleteFile(@StdString BytePointer fname);
public native @ByVal Status DeleteFile(@StdString String fname);
public native @ByVal Status CreateDir(@StdString BytePointer dirname);
public native @ByVal Status CreateDir(@StdString String dirname);
public native @ByVal Status DeleteDir(@StdString BytePointer dirname);
public native @ByVal Status DeleteDir(@StdString String dirname);
public native @ByVal Status GetFileSize(@StdString BytePointer fname, @Cast("tensorflow::uint64*") LongPointer file_size);
public native @ByVal Status GetFileSize(@StdString String fname, @Cast("tensorflow::uint64*") LongBuffer file_size);
public native @ByVal Status GetFileSize(@StdString BytePointer fname, @Cast("tensorflow::uint64*") long... file_size);
public native @ByVal Status GetFileSize(@StdString String fname, @Cast("tensorflow::uint64*") LongPointer file_size);
public native @ByVal Status GetFileSize(@StdString BytePointer fname, @Cast("tensorflow::uint64*") LongBuffer file_size);
public native @ByVal Status GetFileSize(@StdString String fname, @Cast("tensorflow::uint64*") long... file_size);
public native @ByVal Status RenameFile(@StdString BytePointer src, @StdString BytePointer target);
public native @ByVal Status RenameFile(@StdString String src, @StdString String target);
// Translate an URI to a filename usable by the FileSystem implementation. The
// implementation in this class returns the name as-is.
public native @StdString BytePointer TranslateName(@StdString BytePointer name);
public native @StdString String TranslateName(@StdString String name);
}
// Degenerate file system that provides no implementations.
@Namespace("tensorflow") public static class NullFileSystem extends FileSystem {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public NullFileSystem(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public NullFileSystem(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public NullFileSystem position(long position) {
return (NullFileSystem)super.position(position);
}
public NullFileSystem() { super((Pointer)null); allocate(); }
private native void allocate();
public native @ByVal Status NewRandomAccessFile(@StdString BytePointer fname,
@Cast("tensorflow::RandomAccessFile**") PointerPointer result);
public native @ByVal Status NewRandomAccessFile(@StdString BytePointer fname,
@ByPtrPtr RandomAccessFile result);
public native @ByVal Status NewRandomAccessFile(@StdString String fname,
@ByPtrPtr RandomAccessFile result);
public native @ByVal Status NewWritableFile(@StdString BytePointer fname, @Cast("tensorflow::WritableFile**") PointerPointer result);
public native @ByVal Status NewWritableFile(@StdString BytePointer fname, @ByPtrPtr WritableFile result);
public native @ByVal Status NewWritableFile(@StdString String fname, @ByPtrPtr WritableFile result);
public native @ByVal Status NewAppendableFile(@StdString BytePointer fname,
@Cast("tensorflow::WritableFile**") PointerPointer result);
public native @ByVal Status NewAppendableFile(@StdString BytePointer fname,
@ByPtrPtr WritableFile result);
public native @ByVal Status NewAppendableFile(@StdString String fname,
@ByPtrPtr WritableFile result);
public native @ByVal Status NewReadOnlyMemoryRegionFromFile(
@StdString BytePointer fname, @Cast("tensorflow::ReadOnlyMemoryRegion**") PointerPointer result);
public native @ByVal Status NewReadOnlyMemoryRegionFromFile(
@StdString BytePointer fname, @ByPtrPtr ReadOnlyMemoryRegion result);
public native @ByVal Status NewReadOnlyMemoryRegionFromFile(
@StdString String fname, @ByPtrPtr ReadOnlyMemoryRegion result);
public native @Cast("bool") boolean FileExists(@StdString BytePointer fname);
public native @Cast("bool") boolean FileExists(@StdString String fname);
public native @ByVal Status GetChildren(@StdString BytePointer dir, StringVector result);
public native @ByVal Status GetChildren(@StdString String dir, StringVector result);
public native @ByVal Status DeleteFile(@StdString BytePointer fname);
public native @ByVal Status DeleteFile(@StdString String fname);
public native @ByVal Status CreateDir(@StdString BytePointer dirname);
public native @ByVal Status CreateDir(@StdString String dirname);
public native @ByVal Status DeleteDir(@StdString BytePointer dirname);
public native @ByVal Status DeleteDir(@StdString String dirname);
public native @ByVal Status GetFileSize(@StdString BytePointer fname, @Cast("tensorflow::uint64*") LongPointer file_size);
public native @ByVal Status GetFileSize(@StdString String fname, @Cast("tensorflow::uint64*") LongBuffer file_size);
public native @ByVal Status GetFileSize(@StdString BytePointer fname, @Cast("tensorflow::uint64*") long... file_size);
public native @ByVal Status GetFileSize(@StdString String fname, @Cast("tensorflow::uint64*") LongPointer file_size);
public native @ByVal Status GetFileSize(@StdString BytePointer fname, @Cast("tensorflow::uint64*") LongBuffer file_size);
public native @ByVal Status GetFileSize(@StdString String fname, @Cast("tensorflow::uint64*") long... file_size);
public native @ByVal Status RenameFile(@StdString BytePointer src, @StdString BytePointer target);
public native @ByVal Status RenameFile(@StdString String src, @StdString String target);
}
/** A file abstraction for randomly reading the contents of a file. */
@Namespace("tensorflow") public static class RandomAccessFile extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public RandomAccessFile(Pointer p) { super(p); }
/** \brief Reads up to {@code n} bytes from the file starting at {@code offset}.
*
* {@code scratch[0..n-1]} may be written by this routine. Sets {@code *result}
* to the data that was read (including if fewer than {@code n} bytes were
* successfully read). May set {@code *result} to point at data in
* {@code scratch[0..n-1]}, so {@code scratch[0..n-1]} must be live when
* {@code *result} is used.
*
* On OK returned status: {@code n} bytes have been stored in {@code *result}.
* On non-OK returned status: {@code [0..n]} bytes have been stored in {@code *result}.
*
* Returns {@code OUT_OF_RANGE} if fewer than n bytes were stored in {@code *result}
* because of EOF.
*
* Safe for concurrent use by multiple threads. */
public native @ByVal Status Read(@Cast("tensorflow::uint64") long offset, @Cast("size_t") long n, @StringPiece BytePointer result,
@Cast("char*") BytePointer scratch);
public native @ByVal Status Read(@Cast("tensorflow::uint64") long offset, @Cast("size_t") long n, @StringPiece BytePointer result,
@Cast("char*") ByteBuffer scratch);
public native @ByVal Status Read(@Cast("tensorflow::uint64") long offset, @Cast("size_t") long n, @StringPiece BytePointer result,
@Cast("char*") byte[] scratch);
}
/** \brief A file abstraction for sequential writing.
*
* The implementation must provide buffering since callers may append
* small fragments at a time to the file. */
@Namespace("tensorflow") public static class WritableFile extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public WritableFile(Pointer p) { super(p); }
public native @ByVal Status Append(@StringPiece BytePointer data);
public native @ByVal Status Append(@StringPiece String data);
public native @ByVal Status Close();
public native @ByVal Status Flush();
public native @ByVal Status Sync();
}
/** \brief A readonly memmapped file abstraction.
*
* The implementation must guarantee that all memory is accessable when the
* object exists, independently from the Env that created it. */
@Namespace("tensorflow") public static class ReadOnlyMemoryRegion extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ReadOnlyMemoryRegion(Pointer p) { super(p); }
public native @Const Pointer data();
public native @Cast("tensorflow::uint64") long length();
}
/** \brief A registry for file system implementations.
*
* Filenames are specified as an URI, which is of the form
* [scheme://].
* File system implementations are registered using the REGISTER_FILE_SYSTEM
* macro, providing the 'scheme' as the key. */
@Namespace("tensorflow") public static class FileSystemRegistry extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public FileSystemRegistry(Pointer p) { super(p); }
public native void Register(@StdString BytePointer scheme, @ByVal @Cast("tensorflow::FileSystemRegistry::Factory*") Fn factory);
public native void Register(@StdString String scheme, @ByVal @Cast("tensorflow::FileSystemRegistry::Factory*") Fn factory);
public native FileSystem Lookup(@StdString BytePointer scheme);
public native FileSystem Lookup(@StdString String scheme);
public native @ByVal Status GetRegisteredFileSystemSchemes(
StringVector schemes);
}
// Given URI of the form [scheme://], return 'scheme'.
@Namespace("tensorflow") public static native @StdString BytePointer GetSchemeFromURI(@StdString BytePointer name);
@Namespace("tensorflow") public static native @StdString String GetSchemeFromURI(@StdString String name);
// Given URI of the form [scheme://], return 'filename'.
@Namespace("tensorflow") public static native @StdString BytePointer GetNameFromURI(@StdString BytePointer name);
@Namespace("tensorflow") public static native @StdString String GetNameFromURI(@StdString String name);
// namespace tensorflow
// #endif // TENSORFLOW_CORE_PLATFORM_FILE_SYSTEM_H_
// Parsed from tensorflow/core/platform/env.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// #ifndef TENSORFLOW_CORE_PLATFORM_ENV_H_
// #define TENSORFLOW_CORE_PLATFORM_ENV_H_
// #include
// #include
// #include
// #include
// #include
// #include "tensorflow/core/lib/core/errors.h"
// #include "tensorflow/core/lib/core/status.h"
// #include "tensorflow/core/lib/core/stringpiece.h"
// #include "tensorflow/core/platform/file_system.h"
// #include "tensorflow/core/platform/macros.h"
// #include "tensorflow/core/platform/mutex.h"
// #include "tensorflow/core/platform/protobuf.h"
// #include "tensorflow/core/platform/types.h"
/** \brief An interface used by the tensorflow implementation to
* access operating system functionality like the filesystem etc.
*
* Callers may wish to provide a custom Env object to get fine grain
* control.
*
* All Env implementations are safe for concurrent access from
* multiple threads without any external synchronization. */
@Namespace("tensorflow") @NoOffset public static class Env extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Env(Pointer p) { super(p); }
/** \brief Returns a default environment suitable for the current operating
* system.
*
* Sophisticated users may wish to provide their own Env
* implementation instead of relying on this default environment.
*
* The result of Default() belongs to this library and must never be deleted. */
public static native Env Default();
/** \brief Returns the FileSystem object to handle operations on the file
* specified by 'fname'. The FileSystem object is used as the implementation
* for the file system related (non-virtual) functions that follow.
* Returned FileSystem object is still owned by the Env object and will */
// (might) be destroyed when the environment is destroyed.
public native @ByVal Status GetFileSystemForFile(@StdString BytePointer fname, @Cast("tensorflow::FileSystem**") PointerPointer result);
public native @ByVal Status GetFileSystemForFile(@StdString BytePointer fname, @ByPtrPtr FileSystem result);
public native @ByVal Status GetFileSystemForFile(@StdString String fname, @ByPtrPtr FileSystem result);
/** \brief Returns the file system schemes registered for this Env. */
public native @ByVal Status GetRegisteredFileSystemSchemes(StringVector schemes);
// \brief Register a file system for a scheme.
///
///
public native void RegisterFileSystem(@StdString BytePointer scheme,
@ByVal @Cast("tensorflow::FileSystemRegistry::Factory*") Fn factory);
public native void RegisterFileSystem(@StdString String scheme,
@ByVal @Cast("tensorflow::FileSystemRegistry::Factory*") Fn factory);
/** \brief Creates a brand new random access read-only file with the
* specified name.
* On success, stores a pointer to the new file in
* *result and returns OK. On failure stores NULL in *result and
* returns non-OK. If the file does not exist, returns a non-OK
* status.
*
* The returned file may be concurrently accessed by multiple threads.
*
* The ownership of the returned RandomAccessFile is passed to the caller
* and the object should be deleted when is not used. The file object
* shouldn't live longer than the Env object. */
///
///
///
public native @ByVal Status NewRandomAccessFile(@StdString BytePointer fname, @Cast("tensorflow::RandomAccessFile**") PointerPointer result);
public native @ByVal Status NewRandomAccessFile(@StdString BytePointer fname, @ByPtrPtr RandomAccessFile result);
public native @ByVal Status NewRandomAccessFile(@StdString String fname, @ByPtrPtr RandomAccessFile result);
/** \brief Creates an object that writes to a new file with the specified
* name.
*
* Deletes any existing file with the same name and creates a
* new file. On success, stores a pointer to the new file in
* *result and returns OK. On failure stores NULL in *result and
* returns non-OK.
*
* The returned file will only be accessed by one thread at a time.
*
* The ownership of the returned WritableFile is passed to the caller
* and the object should be deleted when is not used. The file object
* shouldn't live longer than the Env object. */
///
///
///
public native @ByVal Status NewWritableFile(@StdString BytePointer fname, @Cast("tensorflow::WritableFile**") PointerPointer result);
public native @ByVal Status NewWritableFile(@StdString BytePointer fname, @ByPtrPtr WritableFile result);
public native @ByVal Status NewWritableFile(@StdString String fname, @ByPtrPtr WritableFile result);
/** \brief Creates an object that either appends to an existing file, or
* writes to a new file (if the file does not exist to begin with).
*
* On success, stores a pointer to the new file in *result and
* returns OK. On failure stores NULL in *result and returns
* non-OK.
*
* The returned file will only be accessed by one thread at a time.
*
* The ownership of the returned WritableFile is passed to the caller
* and the object should be deleted when is not used. The file object
* shouldn't live longer than the Env object. */
///
///
///
public native @ByVal Status NewAppendableFile(@StdString BytePointer fname, @Cast("tensorflow::WritableFile**") PointerPointer result);
public native @ByVal Status NewAppendableFile(@StdString BytePointer fname, @ByPtrPtr WritableFile result);
public native @ByVal Status NewAppendableFile(@StdString String fname, @ByPtrPtr WritableFile result);
/** \brief Creates a readonly region of memory with the file context.
*
* On success, it returns a pointer to read-only memory region
* from the content of file fname. The ownership of the region is passed to
* the caller. On failure stores nullptr in *result and returns non-OK.
*
* The returned memory region can be accessed from many threads in parallel.
*
* The ownership of the returned ReadOnlyMemoryRegion is passed to the caller
* and the object should be deleted when is not used. The memory region
* object shouldn't live longer than the Env object. */
public native @ByVal Status NewReadOnlyMemoryRegionFromFile(@StdString BytePointer fname,
@Cast("tensorflow::ReadOnlyMemoryRegion**") PointerPointer result);
public native @ByVal Status NewReadOnlyMemoryRegionFromFile(@StdString BytePointer fname,
@ByPtrPtr ReadOnlyMemoryRegion result);
public native @ByVal Status NewReadOnlyMemoryRegionFromFile(@StdString String fname,
@ByPtrPtr ReadOnlyMemoryRegion result);
/** Returns true iff the named file exists. */
///
public native @Cast("bool") boolean FileExists(@StdString BytePointer fname);
public native @Cast("bool") boolean FileExists(@StdString String fname);
/** \brief Stores in *result the names of the children of the specified
* directory. The names are relative to "dir".
*
* Original contents of *results are dropped. */
public native @ByVal Status GetChildren(@StdString BytePointer dir, StringVector result);
public native @ByVal Status GetChildren(@StdString String dir, StringVector result);
/** Deletes the named file. */
public native @ByVal Status DeleteFile(@StdString BytePointer fname);
public native @ByVal Status DeleteFile(@StdString String fname);
/** Creates the specified directory. */
public native @ByVal Status CreateDir(@StdString BytePointer dirname);
public native @ByVal Status CreateDir(@StdString String dirname);
/** Deletes the specified directory. */
public native @ByVal Status DeleteDir(@StdString BytePointer dirname);
public native @ByVal Status DeleteDir(@StdString String dirname);
/** Stores the size of {@code fname} in {@code *file_size}. */
public native @ByVal Status GetFileSize(@StdString BytePointer fname, @Cast("tensorflow::uint64*") LongPointer file_size);
public native @ByVal Status GetFileSize(@StdString String fname, @Cast("tensorflow::uint64*") LongBuffer file_size);
public native @ByVal Status GetFileSize(@StdString BytePointer fname, @Cast("tensorflow::uint64*") long... file_size);
public native @ByVal Status GetFileSize(@StdString String fname, @Cast("tensorflow::uint64*") LongPointer file_size);
public native @ByVal Status GetFileSize(@StdString BytePointer fname, @Cast("tensorflow::uint64*") LongBuffer file_size);
public native @ByVal Status GetFileSize(@StdString String fname, @Cast("tensorflow::uint64*") long... file_size);
/** \brief Renames file src to target. If target already exists, it will be
* replaced. */
public native @ByVal Status RenameFile(@StdString BytePointer src, @StdString BytePointer target);
public native @ByVal Status RenameFile(@StdString String src, @StdString String target);
// TODO(jeff,sanjay): Add back thread/thread-pool support if needed.
// TODO(jeff,sanjay): if needed, tighten spec so relative to epoch, or
// provide a routine to get the absolute time.
/** \brief Returns the number of micro-seconds since some fixed point in
* time. Only useful for computing deltas of time. */
public native @Cast("tensorflow::uint64") long NowMicros();
/** Sleeps/delays the thread for the prescribed number of micro-seconds. */
///
public native void SleepForMicroseconds(int micros);
/** \brief Returns a new thread that is running fn() and is identified
* (for debugging/performance-analysis) by "name".
*
* Caller takes ownership of the result and must delete it eventually
* (the deletion will block until fn() stops running). */
public native Thread StartThread(@Const @ByRef ThreadOptions thread_options,
@StdString BytePointer name,
@ByVal Fn fn);
public native Thread StartThread(@Const @ByRef ThreadOptions thread_options,
@StdString String name,
@ByVal Fn fn);
// \brief Schedules the given closure on a thread-pool.
//
// NOTE(mrry): This closure may block.
public native void SchedClosure(@ByVal Fn closure);
// \brief Schedules the given closure on a thread-pool after the given number
// of microseconds.
//
// NOTE(mrry): This closure must not block.
public native void SchedClosureAfter(int micros, @ByVal Fn closure);
// \brief Load a dynamic library.
//
// Pass "library_filename" to a platform-specific mechanism for dynamically
// loading a library. The rules for determining the exact location of the
// library are platform-specific and are not documented here.
//
// On success, returns a handle to the library in "*handle" and returns
// OK from the function.
// Otherwise returns nullptr in "*handle" and an error status from the
// function.
public native @ByVal Status LoadLibrary(@Cast("const char*") BytePointer library_filename, @Cast("void**") PointerPointer handle);
public native @ByVal Status LoadLibrary(@Cast("const char*") BytePointer library_filename, @Cast("void**") @ByPtrPtr Pointer handle);
public native @ByVal Status LoadLibrary(String library_filename, @Cast("void**") @ByPtrPtr Pointer handle);
// \brief Get a pointer to a symbol from a dynamic library.
//
// "handle" should be a pointer returned from a previous call to LoadLibrary.
// On success, store a pointer to the located symbol in "*symbol" and return
// OK from the function. Otherwise, returns nullptr in "*symbol" and an error
// status from the function.
public native @ByVal Status GetSymbolFromLibrary(Pointer handle, @Cast("const char*") BytePointer symbol_name,
@Cast("void**") PointerPointer symbol);
public native @ByVal Status GetSymbolFromLibrary(Pointer handle, @Cast("const char*") BytePointer symbol_name,
@Cast("void**") @ByPtrPtr Pointer symbol);
public native @ByVal Status GetSymbolFromLibrary(Pointer handle, String symbol_name,
@Cast("void**") @ByPtrPtr Pointer symbol);
}
/** \brief An implementation of Env that forwards all calls to another Env.
*
* May be useful to clients who wish to override just part of the
* functionality of another Env. */
@Namespace("tensorflow") @NoOffset public static class EnvWrapper extends Env {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public EnvWrapper(Pointer p) { super(p); }
/** Initializes an EnvWrapper that delegates all calls to *t */
public EnvWrapper(Env t) { super((Pointer)null); allocate(t); }
private native void allocate(Env t);
/** Returns the target to which this Env forwards all calls */
public native Env target();
public native @ByVal Status GetFileSystemForFile(@StdString BytePointer fname,
@Cast("tensorflow::FileSystem**") PointerPointer result);
public native @ByVal Status GetFileSystemForFile(@StdString BytePointer fname,
@ByPtrPtr FileSystem result);
public native @ByVal Status GetFileSystemForFile(@StdString String fname,
@ByPtrPtr FileSystem result);
public native @ByVal Status GetRegisteredFileSystemSchemes(StringVector schemes);
public native void RegisterFileSystem(@StdString BytePointer scheme,
@ByVal @Cast("tensorflow::FileSystemRegistry::Factory*") Fn factory);
public native void RegisterFileSystem(@StdString String scheme,
@ByVal @Cast("tensorflow::FileSystemRegistry::Factory*") Fn factory);
public native @Cast("tensorflow::uint64") long NowMicros();
public native void SleepForMicroseconds(int micros);
public native Thread StartThread(@Const @ByRef ThreadOptions thread_options, @StdString BytePointer name,
@ByVal Fn fn);
public native Thread StartThread(@Const @ByRef ThreadOptions thread_options, @StdString String name,
@ByVal Fn fn);
public native void SchedClosure(@ByVal Fn closure);
public native void SchedClosureAfter(int micros, @ByVal Fn closure);
public native @ByVal Status LoadLibrary(@Cast("const char*") BytePointer library_filename, @Cast("void**") PointerPointer handle);
public native @ByVal Status LoadLibrary(@Cast("const char*") BytePointer library_filename, @Cast("void**") @ByPtrPtr Pointer handle);
public native @ByVal Status LoadLibrary(String library_filename, @Cast("void**") @ByPtrPtr Pointer handle);
public native @ByVal Status GetSymbolFromLibrary(Pointer handle, @Cast("const char*") BytePointer symbol_name,
@Cast("void**") PointerPointer symbol);
public native @ByVal Status GetSymbolFromLibrary(Pointer handle, @Cast("const char*") BytePointer symbol_name,
@Cast("void**") @ByPtrPtr Pointer symbol);
public native @ByVal Status GetSymbolFromLibrary(Pointer handle, String symbol_name,
@Cast("void**") @ByPtrPtr Pointer symbol);
}
@Namespace("tensorflow") public static class Thread extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Thread(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public Thread(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public Thread position(long position) {
return (Thread)super.position(position);
}
public Thread() { super((Pointer)null); allocate(); }
private native void allocate();
/** Blocks until the thread of control stops running. */
}
/** \brief Options to configure a Thread.
*
* Note that the options are all hints, and the
* underlying implementation may choose to ignore it. */
@Namespace("tensorflow") public static class ThreadOptions extends Pointer {
static { Loader.load(); }
/** Default native constructor. */
public ThreadOptions() { super((Pointer)null); allocate(); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public ThreadOptions(long size) { super((Pointer)null); allocateArray(size); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ThreadOptions(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(long size);
@Override public ThreadOptions position(long position) {
return (ThreadOptions)super.position(position);
}
/** Thread stack size to use (in bytes). */
public native @Cast("size_t") long stack_size(); public native ThreadOptions stack_size(long stack_size); // 0: use system default value
/** Guard area size to use near thread stacks to use (in bytes) */
public native @Cast("size_t") long guard_size(); public native ThreadOptions guard_size(long guard_size); // 0: use system default value
}
/** A utility routine: reads contents of named file into {@code *data} */
@Namespace("tensorflow") public static native @ByVal Status ReadFileToString(Env env, @StdString BytePointer fname, @StdString @Cast({"char*", "std::string*"}) BytePointer data);
@Namespace("tensorflow") public static native @ByVal Status ReadFileToString(Env env, @StdString String fname, @StdString @Cast({"char*", "std::string*"}) BytePointer data);
/** A utility routine: write contents of {@code data} to file named {@code fname}
* (overwriting existing contents, if any). */
@Namespace("tensorflow") public static native @ByVal Status WriteStringToFile(Env env, @StdString BytePointer fname,
@StringPiece BytePointer data);
@Namespace("tensorflow") public static native @ByVal Status WriteStringToFile(Env env, @StdString String fname,
@StringPiece String data);
/** Reads contents of named file and parse as binary encoded proto data
* and store into {@code *proto}. */
@Namespace("tensorflow") public static native @ByVal Status ReadBinaryProto(Env env, @StdString BytePointer fname,
@Cast("tensorflow::protobuf::MessageLite*") Pointer proto);
@Namespace("tensorflow") public static native @ByVal Status ReadBinaryProto(Env env, @StdString String fname,
@Cast("tensorflow::protobuf::MessageLite*") Pointer proto);
// namespace register_file_system
// namespace tensorflow
// Register a FileSystem implementation for a scheme. Files with names that have
// "scheme://" prefixes are routed to use this implementation.
// #define REGISTER_FILE_SYSTEM_ENV(env, scheme, factory)
// REGISTER_FILE_SYSTEM_UNIQ_HELPER(__COUNTER__, env, scheme, factory)
// #define REGISTER_FILE_SYSTEM_UNIQ_HELPER(ctr, env, scheme, factory)
// REGISTER_FILE_SYSTEM_UNIQ(ctr, env, scheme, factory)
// #define REGISTER_FILE_SYSTEM_UNIQ(ctr, env, scheme, factory)
// static ::tensorflow::register_file_system::Register
// register_ff##ctr TF_ATTRIBUTE_UNUSED =
// ::tensorflow::register_file_system::Register(env, scheme)
// #define REGISTER_FILE_SYSTEM(scheme, factory)
// REGISTER_FILE_SYSTEM_ENV(Env::Default(), scheme, factory);
// #endif // TENSORFLOW_CORE_PLATFORM_ENV_H_
// Parsed from tensorflow/core/protobuf/config.pb.h
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/protobuf/config.proto
// #ifndef PROTOBUF_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto__INCLUDED
// #define PROTOBUF_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto__INCLUDED
// #include
// #include
// #if GOOGLE_PROTOBUF_VERSION < 3000000
// #error This file was generated by a newer version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please update
// #error your headers.
// #endif
// #if 3000000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
// #error This file was generated by an older version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please
// #error regenerate this file with a newer version of protoc.
// #endif
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include "tensorflow/core/framework/step_stats.pb.h"
// @@protoc_insertion_point(includes)
// Internal implementation detail -- do not call these.
@Namespace("tensorflow") public static native void protobuf_AddDesc_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto();
@Namespace("tensorflow") public static native void protobuf_AssignDesc_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto();
@Namespace("tensorflow") public static native void protobuf_ShutdownFile_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto();
/** enum tensorflow::OptimizerOptions_Level */
public static final int
OptimizerOptions_Level_L1 = 0,
OptimizerOptions_Level_L0 = -1,
OptimizerOptions_Level_OptimizerOptions_Level_INT_MIN_SENTINEL_DO_NOT_USE_ =kint32min,
OptimizerOptions_Level_OptimizerOptions_Level_INT_MAX_SENTINEL_DO_NOT_USE_ =kint32max;
@Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_Level_IsValid(int value);
@Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::OptimizerOptions_Level") int OptimizerOptions_Level_Level_MIN();
@Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::OptimizerOptions_Level") int OptimizerOptions_Level_Level_MAX();
@Namespace("tensorflow") @MemberGetter public static native int OptimizerOptions_Level_Level_ARRAYSIZE();
@Namespace("tensorflow") public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer OptimizerOptions_Level_descriptor();
@Namespace("tensorflow") public static native @StdString BytePointer OptimizerOptions_Level_Name(@Cast("tensorflow::OptimizerOptions_Level") int value);
@Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_Level_Parse(
@StdString BytePointer name, @Cast("tensorflow::OptimizerOptions_Level*") IntPointer value);
@Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_Level_Parse(
@StdString String name, @Cast("tensorflow::OptimizerOptions_Level*") IntBuffer value);
@Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_Level_Parse(
@StdString BytePointer name, @Cast("tensorflow::OptimizerOptions_Level*") int... value);
@Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_Level_Parse(
@StdString String name, @Cast("tensorflow::OptimizerOptions_Level*") IntPointer value);
@Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_Level_Parse(
@StdString BytePointer name, @Cast("tensorflow::OptimizerOptions_Level*") IntBuffer value);
@Namespace("tensorflow") public static native @Cast("bool") boolean OptimizerOptions_Level_Parse(
@StdString String name, @Cast("tensorflow::OptimizerOptions_Level*") int... value);
/** enum tensorflow::RunOptions_TraceLevel */
public static final int
RunOptions_TraceLevel_NO_TRACE = 0,
RunOptions_TraceLevel_FULL_TRACE = 1,
RunOptions_TraceLevel_RunOptions_TraceLevel_INT_MIN_SENTINEL_DO_NOT_USE_ =kint32min,
RunOptions_TraceLevel_RunOptions_TraceLevel_INT_MAX_SENTINEL_DO_NOT_USE_ =kint32max;
@Namespace("tensorflow") public static native @Cast("bool") boolean RunOptions_TraceLevel_IsValid(int value);
@Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::RunOptions_TraceLevel") int RunOptions_TraceLevel_TraceLevel_MIN();
@Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::RunOptions_TraceLevel") int RunOptions_TraceLevel_TraceLevel_MAX();
@Namespace("tensorflow") @MemberGetter public static native int RunOptions_TraceLevel_TraceLevel_ARRAYSIZE();
@Namespace("tensorflow") public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer RunOptions_TraceLevel_descriptor();
@Namespace("tensorflow") public static native @StdString BytePointer RunOptions_TraceLevel_Name(@Cast("tensorflow::RunOptions_TraceLevel") int value);
@Namespace("tensorflow") public static native @Cast("bool") boolean RunOptions_TraceLevel_Parse(
@StdString BytePointer name, @Cast("tensorflow::RunOptions_TraceLevel*") IntPointer value);
@Namespace("tensorflow") public static native @Cast("bool") boolean RunOptions_TraceLevel_Parse(
@StdString String name, @Cast("tensorflow::RunOptions_TraceLevel*") IntBuffer value);
@Namespace("tensorflow") public static native @Cast("bool") boolean RunOptions_TraceLevel_Parse(
@StdString BytePointer name, @Cast("tensorflow::RunOptions_TraceLevel*") int... value);
@Namespace("tensorflow") public static native @Cast("bool") boolean RunOptions_TraceLevel_Parse(
@StdString String name, @Cast("tensorflow::RunOptions_TraceLevel*") IntPointer value);
@Namespace("tensorflow") public static native @Cast("bool") boolean RunOptions_TraceLevel_Parse(
@StdString BytePointer name, @Cast("tensorflow::RunOptions_TraceLevel*") IntBuffer value);
@Namespace("tensorflow") public static native @Cast("bool") boolean RunOptions_TraceLevel_Parse(
@StdString String name, @Cast("tensorflow::RunOptions_TraceLevel*") int... value);
// ===================================================================
@Namespace("tensorflow") @NoOffset public static class GPUOptions extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public GPUOptions(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public GPUOptions(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public GPUOptions position(long position) {
return (GPUOptions)super.position(position);
}
public GPUOptions() { super((Pointer)null); allocate(); }
private native void allocate();
public GPUOptions(@Const @ByRef GPUOptions from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef GPUOptions from);
public native @ByRef @Name("operator =") GPUOptions put(@Const @ByRef GPUOptions from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef GPUOptions default_instance();
public native void Swap(GPUOptions other);
// implements Message ----------------------------------------------
public native GPUOptions New();
public native GPUOptions New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef GPUOptions from);
public native void MergeFrom(@Const @ByRef GPUOptions from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional double per_process_gpu_memory_fraction = 1;
public native void clear_per_process_gpu_memory_fraction();
@MemberGetter public static native int kPerProcessGpuMemoryFractionFieldNumber();
public static final int kPerProcessGpuMemoryFractionFieldNumber = kPerProcessGpuMemoryFractionFieldNumber();
public native double per_process_gpu_memory_fraction();
public native void set_per_process_gpu_memory_fraction(double value);
// optional string allocator_type = 2;
public native void clear_allocator_type();
@MemberGetter public static native int kAllocatorTypeFieldNumber();
public static final int kAllocatorTypeFieldNumber = kAllocatorTypeFieldNumber();
public native @StdString BytePointer allocator_type();
public native void set_allocator_type(@StdString BytePointer value);
public native void set_allocator_type(@StdString String value);
public native void set_allocator_type(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_allocator_type(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_allocator_type();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_allocator_type();
public native void set_allocated_allocator_type(@StdString @Cast({"char*", "std::string*"}) BytePointer allocator_type);
// optional int64 deferred_deletion_bytes = 3;
public native void clear_deferred_deletion_bytes();
@MemberGetter public static native int kDeferredDeletionBytesFieldNumber();
public static final int kDeferredDeletionBytesFieldNumber = kDeferredDeletionBytesFieldNumber();
public native @Cast("google::protobuf::int64") long deferred_deletion_bytes();
public native void set_deferred_deletion_bytes(@Cast("google::protobuf::int64") long value);
// optional bool allow_growth = 4;
public native void clear_allow_growth();
@MemberGetter public static native int kAllowGrowthFieldNumber();
public static final int kAllowGrowthFieldNumber = kAllowGrowthFieldNumber();
public native @Cast("bool") boolean allow_growth();
public native void set_allow_growth(@Cast("bool") boolean value);
}
// -------------------------------------------------------------------
@Namespace("tensorflow") @NoOffset public static class OptimizerOptions extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public OptimizerOptions(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public OptimizerOptions(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public OptimizerOptions position(long position) {
return (OptimizerOptions)super.position(position);
}
public OptimizerOptions() { super((Pointer)null); allocate(); }
private native void allocate();
public OptimizerOptions(@Const @ByRef OptimizerOptions from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef OptimizerOptions from);
public native @ByRef @Name("operator =") OptimizerOptions put(@Const @ByRef OptimizerOptions from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef OptimizerOptions default_instance();
public native void Swap(OptimizerOptions other);
// implements Message ----------------------------------------------
public native OptimizerOptions New();
public native OptimizerOptions New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef OptimizerOptions from);
public native void MergeFrom(@Const @ByRef OptimizerOptions from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
@MemberGetter public static native @Cast("const tensorflow::OptimizerOptions::Level") int L1();
public static final int L1 = L1();
@MemberGetter public static native @Cast("const tensorflow::OptimizerOptions::Level") int L0();
public static final int L0 = L0();
public static native @Cast("bool") boolean Level_IsValid(int value);
@MemberGetter public static native @Cast("const tensorflow::OptimizerOptions::Level") int Level_MIN();
public static final int Level_MIN = Level_MIN();
@MemberGetter public static native @Cast("const tensorflow::OptimizerOptions::Level") int Level_MAX();
public static final int Level_MAX = Level_MAX();
@MemberGetter public static native int Level_ARRAYSIZE();
public static final int Level_ARRAYSIZE = Level_ARRAYSIZE();
public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer Level_descriptor();
public static native @StdString BytePointer Level_Name(@Cast("tensorflow::OptimizerOptions::Level") int value);
public static native @Cast("bool") boolean Level_Parse(@StdString BytePointer name,
@Cast("tensorflow::OptimizerOptions::Level*") IntPointer value);
public static native @Cast("bool") boolean Level_Parse(@StdString String name,
@Cast("tensorflow::OptimizerOptions::Level*") IntBuffer value);
public static native @Cast("bool") boolean Level_Parse(@StdString BytePointer name,
@Cast("tensorflow::OptimizerOptions::Level*") int... value);
public static native @Cast("bool") boolean Level_Parse(@StdString String name,
@Cast("tensorflow::OptimizerOptions::Level*") IntPointer value);
public static native @Cast("bool") boolean Level_Parse(@StdString BytePointer name,
@Cast("tensorflow::OptimizerOptions::Level*") IntBuffer value);
public static native @Cast("bool") boolean Level_Parse(@StdString String name,
@Cast("tensorflow::OptimizerOptions::Level*") int... value);
// accessors -------------------------------------------------------
// optional bool do_common_subexpression_elimination = 1;
public native void clear_do_common_subexpression_elimination();
@MemberGetter public static native int kDoCommonSubexpressionEliminationFieldNumber();
public static final int kDoCommonSubexpressionEliminationFieldNumber = kDoCommonSubexpressionEliminationFieldNumber();
public native @Cast("bool") boolean do_common_subexpression_elimination();
public native void set_do_common_subexpression_elimination(@Cast("bool") boolean value);
// optional bool do_constant_folding = 2;
public native void clear_do_constant_folding();
@MemberGetter public static native int kDoConstantFoldingFieldNumber();
public static final int kDoConstantFoldingFieldNumber = kDoConstantFoldingFieldNumber();
public native @Cast("bool") boolean do_constant_folding();
public native void set_do_constant_folding(@Cast("bool") boolean value);
// optional bool do_function_inlining = 4;
public native void clear_do_function_inlining();
@MemberGetter public static native int kDoFunctionInliningFieldNumber();
public static final int kDoFunctionInliningFieldNumber = kDoFunctionInliningFieldNumber();
public native @Cast("bool") boolean do_function_inlining();
public native void set_do_function_inlining(@Cast("bool") boolean value);
// optional .tensorflow.OptimizerOptions.Level opt_level = 3;
public native void clear_opt_level();
@MemberGetter public static native int kOptLevelFieldNumber();
public static final int kOptLevelFieldNumber = kOptLevelFieldNumber();
public native @Cast("tensorflow::OptimizerOptions_Level") int opt_level();
public native void set_opt_level(@Cast("tensorflow::OptimizerOptions_Level") int value);
}
// -------------------------------------------------------------------
@Namespace("tensorflow") @NoOffset public static class GraphOptions extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public GraphOptions(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public GraphOptions(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public GraphOptions position(long position) {
return (GraphOptions)super.position(position);
}
public GraphOptions() { super((Pointer)null); allocate(); }
private native void allocate();
public GraphOptions(@Const @ByRef GraphOptions from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef GraphOptions from);
public native @ByRef @Name("operator =") GraphOptions put(@Const @ByRef GraphOptions from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef GraphOptions default_instance();
public native void Swap(GraphOptions other);
// implements Message ----------------------------------------------
public native GraphOptions New();
public native GraphOptions New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef GraphOptions from);
public native void MergeFrom(@Const @ByRef GraphOptions from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional bool enable_recv_scheduling = 2;
public native void clear_enable_recv_scheduling();
@MemberGetter public static native int kEnableRecvSchedulingFieldNumber();
public static final int kEnableRecvSchedulingFieldNumber = kEnableRecvSchedulingFieldNumber();
public native @Cast("bool") boolean enable_recv_scheduling();
public native void set_enable_recv_scheduling(@Cast("bool") boolean value);
// optional .tensorflow.OptimizerOptions optimizer_options = 3;
public native @Cast("bool") boolean has_optimizer_options();
public native void clear_optimizer_options();
@MemberGetter public static native int kOptimizerOptionsFieldNumber();
public static final int kOptimizerOptionsFieldNumber = kOptimizerOptionsFieldNumber();
public native @Const @ByRef OptimizerOptions optimizer_options();
public native OptimizerOptions mutable_optimizer_options();
public native OptimizerOptions release_optimizer_options();
public native void set_allocated_optimizer_options(OptimizerOptions optimizer_options);
// optional bool build_cost_model = 4;
public native void clear_build_cost_model();
@MemberGetter public static native int kBuildCostModelFieldNumber();
public static final int kBuildCostModelFieldNumber = kBuildCostModelFieldNumber();
public native @Cast("bool") boolean build_cost_model();
public native void set_build_cost_model(@Cast("bool") boolean value);
}
// -------------------------------------------------------------------
@Namespace("tensorflow") @NoOffset public static class ConfigProto extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ConfigProto(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public ConfigProto(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public ConfigProto position(long position) {
return (ConfigProto)super.position(position);
}
public ConfigProto() { super((Pointer)null); allocate(); }
private native void allocate();
public ConfigProto(@Const @ByRef ConfigProto from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef ConfigProto from);
public native @ByRef @Name("operator =") ConfigProto put(@Const @ByRef ConfigProto from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef ConfigProto default_instance();
public native void Swap(ConfigProto other);
// implements Message ----------------------------------------------
public native ConfigProto New();
public native ConfigProto New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef ConfigProto from);
public native void MergeFrom(@Const @ByRef ConfigProto from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// map device_count = 1;
public native int device_count_size();
public native void clear_device_count();
@MemberGetter public static native int kDeviceCountFieldNumber();
public static final int kDeviceCountFieldNumber = kDeviceCountFieldNumber();
// optional int32 intra_op_parallelism_threads = 2;
public native void clear_intra_op_parallelism_threads();
@MemberGetter public static native int kIntraOpParallelismThreadsFieldNumber();
public static final int kIntraOpParallelismThreadsFieldNumber = kIntraOpParallelismThreadsFieldNumber();
public native @Cast("google::protobuf::int32") int intra_op_parallelism_threads();
public native void set_intra_op_parallelism_threads(@Cast("google::protobuf::int32") int value);
// optional int32 inter_op_parallelism_threads = 5;
public native void clear_inter_op_parallelism_threads();
@MemberGetter public static native int kInterOpParallelismThreadsFieldNumber();
public static final int kInterOpParallelismThreadsFieldNumber = kInterOpParallelismThreadsFieldNumber();
public native @Cast("google::protobuf::int32") int inter_op_parallelism_threads();
public native void set_inter_op_parallelism_threads(@Cast("google::protobuf::int32") int value);
// optional bool use_per_session_threads = 9;
public native void clear_use_per_session_threads();
@MemberGetter public static native int kUsePerSessionThreadsFieldNumber();
public static final int kUsePerSessionThreadsFieldNumber = kUsePerSessionThreadsFieldNumber();
public native @Cast("bool") boolean use_per_session_threads();
public native void set_use_per_session_threads(@Cast("bool") boolean value);
// optional int32 placement_period = 3;
public native void clear_placement_period();
@MemberGetter public static native int kPlacementPeriodFieldNumber();
public static final int kPlacementPeriodFieldNumber = kPlacementPeriodFieldNumber();
public native @Cast("google::protobuf::int32") int placement_period();
public native void set_placement_period(@Cast("google::protobuf::int32") int value);
// repeated string device_filters = 4;
public native int device_filters_size();
public native void clear_device_filters();
@MemberGetter public static native int kDeviceFiltersFieldNumber();
public static final int kDeviceFiltersFieldNumber = kDeviceFiltersFieldNumber();
public native @StdString BytePointer device_filters(int index);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_device_filters(int index);
public native void set_device_filters(int index, @StdString BytePointer value);
public native void set_device_filters(int index, @StdString String value);
public native void set_device_filters(int index, @Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_device_filters(int index, String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_device_filters();
public native void add_device_filters(@StdString BytePointer value);
public native void add_device_filters(@StdString String value);
public native void add_device_filters(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void add_device_filters(String value, @Cast("size_t") long size);
// optional .tensorflow.GPUOptions gpu_options = 6;
public native @Cast("bool") boolean has_gpu_options();
public native void clear_gpu_options();
@MemberGetter public static native int kGpuOptionsFieldNumber();
public static final int kGpuOptionsFieldNumber = kGpuOptionsFieldNumber();
public native @Const @ByRef GPUOptions gpu_options();
public native GPUOptions mutable_gpu_options();
public native GPUOptions release_gpu_options();
public native void set_allocated_gpu_options(GPUOptions gpu_options);
// optional bool allow_soft_placement = 7;
public native void clear_allow_soft_placement();
@MemberGetter public static native int kAllowSoftPlacementFieldNumber();
public static final int kAllowSoftPlacementFieldNumber = kAllowSoftPlacementFieldNumber();
public native @Cast("bool") boolean allow_soft_placement();
public native void set_allow_soft_placement(@Cast("bool") boolean value);
// optional bool log_device_placement = 8;
public native void clear_log_device_placement();
@MemberGetter public static native int kLogDevicePlacementFieldNumber();
public static final int kLogDevicePlacementFieldNumber = kLogDevicePlacementFieldNumber();
public native @Cast("bool") boolean log_device_placement();
public native void set_log_device_placement(@Cast("bool") boolean value);
// optional .tensorflow.GraphOptions graph_options = 10;
public native @Cast("bool") boolean has_graph_options();
public native void clear_graph_options();
@MemberGetter public static native int kGraphOptionsFieldNumber();
public static final int kGraphOptionsFieldNumber = kGraphOptionsFieldNumber();
public native @Const @ByRef GraphOptions graph_options();
public native GraphOptions mutable_graph_options();
public native GraphOptions release_graph_options();
public native void set_allocated_graph_options(GraphOptions graph_options);
// optional int64 operation_timeout_in_ms = 11;
public native void clear_operation_timeout_in_ms();
@MemberGetter public static native int kOperationTimeoutInMsFieldNumber();
public static final int kOperationTimeoutInMsFieldNumber = kOperationTimeoutInMsFieldNumber();
public native @Cast("google::protobuf::int64") long operation_timeout_in_ms();
public native void set_operation_timeout_in_ms(@Cast("google::protobuf::int64") long value);
}
// -------------------------------------------------------------------
@Namespace("tensorflow") @NoOffset public static class RunOptions extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public RunOptions(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public RunOptions(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public RunOptions position(long position) {
return (RunOptions)super.position(position);
}
public RunOptions() { super((Pointer)null); allocate(); }
private native void allocate();
public RunOptions(@Const @ByRef RunOptions from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef RunOptions from);
public native @ByRef @Name("operator =") RunOptions put(@Const @ByRef RunOptions from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef RunOptions default_instance();
public native void Swap(RunOptions other);
// implements Message ----------------------------------------------
public native RunOptions New();
public native RunOptions New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef RunOptions from);
public native void MergeFrom(@Const @ByRef RunOptions from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
@MemberGetter public static native @Cast("const tensorflow::RunOptions::TraceLevel") int NO_TRACE();
public static final int NO_TRACE = NO_TRACE();
@MemberGetter public static native @Cast("const tensorflow::RunOptions::TraceLevel") int FULL_TRACE();
public static final int FULL_TRACE = FULL_TRACE();
public static native @Cast("bool") boolean TraceLevel_IsValid(int value);
@MemberGetter public static native @Cast("const tensorflow::RunOptions::TraceLevel") int TraceLevel_MIN();
public static final int TraceLevel_MIN = TraceLevel_MIN();
@MemberGetter public static native @Cast("const tensorflow::RunOptions::TraceLevel") int TraceLevel_MAX();
public static final int TraceLevel_MAX = TraceLevel_MAX();
@MemberGetter public static native int TraceLevel_ARRAYSIZE();
public static final int TraceLevel_ARRAYSIZE = TraceLevel_ARRAYSIZE();
public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer TraceLevel_descriptor();
public static native @StdString BytePointer TraceLevel_Name(@Cast("tensorflow::RunOptions::TraceLevel") int value);
public static native @Cast("bool") boolean TraceLevel_Parse(@StdString BytePointer name,
@Cast("tensorflow::RunOptions::TraceLevel*") IntPointer value);
public static native @Cast("bool") boolean TraceLevel_Parse(@StdString String name,
@Cast("tensorflow::RunOptions::TraceLevel*") IntBuffer value);
public static native @Cast("bool") boolean TraceLevel_Parse(@StdString BytePointer name,
@Cast("tensorflow::RunOptions::TraceLevel*") int... value);
public static native @Cast("bool") boolean TraceLevel_Parse(@StdString String name,
@Cast("tensorflow::RunOptions::TraceLevel*") IntPointer value);
public static native @Cast("bool") boolean TraceLevel_Parse(@StdString BytePointer name,
@Cast("tensorflow::RunOptions::TraceLevel*") IntBuffer value);
public static native @Cast("bool") boolean TraceLevel_Parse(@StdString String name,
@Cast("tensorflow::RunOptions::TraceLevel*") int... value);
// accessors -------------------------------------------------------
// optional .tensorflow.RunOptions.TraceLevel trace_level = 1;
public native void clear_trace_level();
@MemberGetter public static native int kTraceLevelFieldNumber();
public static final int kTraceLevelFieldNumber = kTraceLevelFieldNumber();
public native @Cast("tensorflow::RunOptions_TraceLevel") int trace_level();
public native void set_trace_level(@Cast("tensorflow::RunOptions_TraceLevel") int value);
// optional int64 timeout_in_ms = 2;
public native void clear_timeout_in_ms();
@MemberGetter public static native int kTimeoutInMsFieldNumber();
public static final int kTimeoutInMsFieldNumber = kTimeoutInMsFieldNumber();
public native @Cast("google::protobuf::int64") long timeout_in_ms();
public native void set_timeout_in_ms(@Cast("google::protobuf::int64") long value);
}
// -------------------------------------------------------------------
@Namespace("tensorflow") @NoOffset public static class RunMetadata extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public RunMetadata(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public RunMetadata(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public RunMetadata position(long position) {
return (RunMetadata)super.position(position);
}
public RunMetadata() { super((Pointer)null); allocate(); }
private native void allocate();
public RunMetadata(@Const @ByRef RunMetadata from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef RunMetadata from);
public native @ByRef @Name("operator =") RunMetadata put(@Const @ByRef RunMetadata from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef RunMetadata default_instance();
public native void Swap(RunMetadata other);
// implements Message ----------------------------------------------
public native RunMetadata New();
public native RunMetadata New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef RunMetadata from);
public native void MergeFrom(@Const @ByRef RunMetadata from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional .tensorflow.StepStats step_stats = 1;
public native @Cast("bool") boolean has_step_stats();
public native void clear_step_stats();
@MemberGetter public static native int kStepStatsFieldNumber();
public static final int kStepStatsFieldNumber = kStepStatsFieldNumber();
public native @Const @ByRef StepStats step_stats();
public native StepStats mutable_step_stats();
public native StepStats release_step_stats();
public native void set_allocated_step_stats(StepStats step_stats);
}
// ===================================================================
// ===================================================================
// #if !PROTOBUF_INLINE_NOT_IN_HEADERS
// GPUOptions
// optional double per_process_gpu_memory_fraction = 1;
// optional string allocator_type = 2;
// optional int64 deferred_deletion_bytes = 3;
// optional bool allow_growth = 4;
// -------------------------------------------------------------------
// OptimizerOptions
// optional bool do_common_subexpression_elimination = 1;
// optional bool do_constant_folding = 2;
// optional bool do_function_inlining = 4;
// optional .tensorflow.OptimizerOptions.Level opt_level = 3;
// -------------------------------------------------------------------
// GraphOptions
// optional bool enable_recv_scheduling = 2;
// optional .tensorflow.OptimizerOptions optimizer_options = 3;
// optional bool build_cost_model = 4;
// -------------------------------------------------------------------
// ConfigProto
// map device_count = 1;
// optional int32 intra_op_parallelism_threads = 2;
// optional int32 inter_op_parallelism_threads = 5;
// optional bool use_per_session_threads = 9;
// optional int32 placement_period = 3;
// repeated string device_filters = 4;
// optional .tensorflow.GPUOptions gpu_options = 6;
// optional bool allow_soft_placement = 7;
// optional bool log_device_placement = 8;
// optional .tensorflow.GraphOptions graph_options = 10;
// optional int64 operation_timeout_in_ms = 11;
// -------------------------------------------------------------------
// RunOptions
// optional .tensorflow.RunOptions.TraceLevel trace_level = 1;
// optional int64 timeout_in_ms = 2;
// -------------------------------------------------------------------
// RunMetadata
// optional .tensorflow.StepStats step_stats = 1;
// #endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// @@protoc_insertion_point(namespace_scope)
// namespace tensorflow
// #ifndef SWIG
// #endif // SWIG
// @@protoc_insertion_point(global_scope)
// #endif // PROTOBUF_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto__INCLUDED
// Parsed from tensorflow/core/framework/step_stats.pb.h
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/framework/step_stats.proto
// #ifndef PROTOBUF_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto__INCLUDED
// #define PROTOBUF_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto__INCLUDED
// #include
// #include
// #if GOOGLE_PROTOBUF_VERSION < 3000000
// #error This file was generated by a newer version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please update
// #error your headers.
// #endif
// #if 3000000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
// #error This file was generated by an older version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please
// #error regenerate this file with a newer version of protoc.
// #endif
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include "tensorflow/core/framework/allocation_description.pb.h"
// #include "tensorflow/core/framework/tensor_description.pb.h"
// @@protoc_insertion_point(includes)
// Internal implementation detail -- do not call these.
@Namespace("tensorflow") public static native void protobuf_AddDesc_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto();
@Namespace("tensorflow") public static native void protobuf_AssignDesc_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto();
@Namespace("tensorflow") public static native void protobuf_ShutdownFile_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto();
// ===================================================================
@Namespace("tensorflow") @NoOffset public static class AllocatorMemoryUsed extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public AllocatorMemoryUsed(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public AllocatorMemoryUsed(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public AllocatorMemoryUsed position(long position) {
return (AllocatorMemoryUsed)super.position(position);
}
public AllocatorMemoryUsed() { super((Pointer)null); allocate(); }
private native void allocate();
public AllocatorMemoryUsed(@Const @ByRef AllocatorMemoryUsed from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef AllocatorMemoryUsed from);
public native @ByRef @Name("operator =") AllocatorMemoryUsed put(@Const @ByRef AllocatorMemoryUsed from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef AllocatorMemoryUsed default_instance();
public native void Swap(AllocatorMemoryUsed other);
// implements Message ----------------------------------------------
public native AllocatorMemoryUsed New();
public native AllocatorMemoryUsed New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef AllocatorMemoryUsed from);
public native void MergeFrom(@Const @ByRef AllocatorMemoryUsed from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional string allocator_name = 1;
public native void clear_allocator_name();
@MemberGetter public static native int kAllocatorNameFieldNumber();
public static final int kAllocatorNameFieldNumber = kAllocatorNameFieldNumber();
public native @StdString BytePointer allocator_name();
public native void set_allocator_name(@StdString BytePointer value);
public native void set_allocator_name(@StdString String value);
public native void set_allocator_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_allocator_name(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_allocator_name();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_allocator_name();
public native void set_allocated_allocator_name(@StdString @Cast({"char*", "std::string*"}) BytePointer allocator_name);
// optional int64 total_bytes = 2;
public native void clear_total_bytes();
@MemberGetter public static native int kTotalBytesFieldNumber();
public static final int kTotalBytesFieldNumber = kTotalBytesFieldNumber();
public native @Cast("google::protobuf::int64") long total_bytes();
public native void set_total_bytes(@Cast("google::protobuf::int64") long value);
// optional int64 peak_bytes = 3;
public native void clear_peak_bytes();
@MemberGetter public static native int kPeakBytesFieldNumber();
public static final int kPeakBytesFieldNumber = kPeakBytesFieldNumber();
public native @Cast("google::protobuf::int64") long peak_bytes();
public native void set_peak_bytes(@Cast("google::protobuf::int64") long value);
}
// -------------------------------------------------------------------
@Namespace("tensorflow") @NoOffset public static class NodeOutput extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public NodeOutput(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public NodeOutput(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public NodeOutput position(long position) {
return (NodeOutput)super.position(position);
}
public NodeOutput() { super((Pointer)null); allocate(); }
private native void allocate();
public NodeOutput(@Const @ByRef NodeOutput from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef NodeOutput from);
public native @ByRef @Name("operator =") NodeOutput put(@Const @ByRef NodeOutput from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef NodeOutput default_instance();
public native void Swap(NodeOutput other);
// implements Message ----------------------------------------------
public native NodeOutput New();
public native NodeOutput New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef NodeOutput from);
public native void MergeFrom(@Const @ByRef NodeOutput from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional int32 slot = 1;
public native void clear_slot();
@MemberGetter public static native int kSlotFieldNumber();
public static final int kSlotFieldNumber = kSlotFieldNumber();
public native @Cast("google::protobuf::int32") int slot();
public native void set_slot(@Cast("google::protobuf::int32") int value);
// optional .tensorflow.TensorDescription tensor_description = 3;
public native @Cast("bool") boolean has_tensor_description();
public native void clear_tensor_description();
@MemberGetter public static native int kTensorDescriptionFieldNumber();
public static final int kTensorDescriptionFieldNumber = kTensorDescriptionFieldNumber();
public native @Const @ByRef TensorDescription tensor_description();
public native TensorDescription mutable_tensor_description();
public native TensorDescription release_tensor_description();
public native void set_allocated_tensor_description(TensorDescription tensor_description);
}
// -------------------------------------------------------------------
@Namespace("tensorflow") @NoOffset public static class NodeExecStats extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public NodeExecStats(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public NodeExecStats(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public NodeExecStats position(long position) {
return (NodeExecStats)super.position(position);
}
public NodeExecStats() { super((Pointer)null); allocate(); }
private native void allocate();
public NodeExecStats(@Const @ByRef NodeExecStats from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef NodeExecStats from);
public native @ByRef @Name("operator =") NodeExecStats put(@Const @ByRef NodeExecStats from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef NodeExecStats default_instance();
public native void Swap(NodeExecStats other);
// implements Message ----------------------------------------------
public native NodeExecStats New();
public native NodeExecStats New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef NodeExecStats from);
public native void MergeFrom(@Const @ByRef NodeExecStats from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional string node_name = 1;
public native void clear_node_name();
@MemberGetter public static native int kNodeNameFieldNumber();
public static final int kNodeNameFieldNumber = kNodeNameFieldNumber();
public native @StdString BytePointer node_name();
public native void set_node_name(@StdString BytePointer value);
public native void set_node_name(@StdString String value);
public native void set_node_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_node_name(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_node_name();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_node_name();
public native void set_allocated_node_name(@StdString @Cast({"char*", "std::string*"}) BytePointer node_name);
// optional int64 all_start_micros = 2;
public native void clear_all_start_micros();
@MemberGetter public static native int kAllStartMicrosFieldNumber();
public static final int kAllStartMicrosFieldNumber = kAllStartMicrosFieldNumber();
public native @Cast("google::protobuf::int64") long all_start_micros();
public native void set_all_start_micros(@Cast("google::protobuf::int64") long value);
// optional int64 op_start_rel_micros = 3;
public native void clear_op_start_rel_micros();
@MemberGetter public static native int kOpStartRelMicrosFieldNumber();
public static final int kOpStartRelMicrosFieldNumber = kOpStartRelMicrosFieldNumber();
public native @Cast("google::protobuf::int64") long op_start_rel_micros();
public native void set_op_start_rel_micros(@Cast("google::protobuf::int64") long value);
// optional int64 op_end_rel_micros = 4;
public native void clear_op_end_rel_micros();
@MemberGetter public static native int kOpEndRelMicrosFieldNumber();
public static final int kOpEndRelMicrosFieldNumber = kOpEndRelMicrosFieldNumber();
public native @Cast("google::protobuf::int64") long op_end_rel_micros();
public native void set_op_end_rel_micros(@Cast("google::protobuf::int64") long value);
// optional int64 all_end_rel_micros = 5;
public native void clear_all_end_rel_micros();
@MemberGetter public static native int kAllEndRelMicrosFieldNumber();
public static final int kAllEndRelMicrosFieldNumber = kAllEndRelMicrosFieldNumber();
public native @Cast("google::protobuf::int64") long all_end_rel_micros();
public native void set_all_end_rel_micros(@Cast("google::protobuf::int64") long value);
// repeated .tensorflow.AllocatorMemoryUsed memory = 6;
public native int memory_size();
public native void clear_memory();
@MemberGetter public static native int kMemoryFieldNumber();
public static final int kMemoryFieldNumber = kMemoryFieldNumber();
public native @Const @ByRef AllocatorMemoryUsed memory(int index);
public native AllocatorMemoryUsed mutable_memory(int index);
public native AllocatorMemoryUsed add_memory();
// repeated .tensorflow.NodeOutput output = 7;
public native int output_size();
public native void clear_output();
@MemberGetter public static native int kOutputFieldNumber();
public static final int kOutputFieldNumber = kOutputFieldNumber();
public native @Const @ByRef NodeOutput output(int index);
public native NodeOutput mutable_output(int index);
public native NodeOutput add_output();
// optional string timeline_label = 8;
public native void clear_timeline_label();
@MemberGetter public static native int kTimelineLabelFieldNumber();
public static final int kTimelineLabelFieldNumber = kTimelineLabelFieldNumber();
public native @StdString BytePointer timeline_label();
public native void set_timeline_label(@StdString BytePointer value);
public native void set_timeline_label(@StdString String value);
public native void set_timeline_label(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_timeline_label(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_timeline_label();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_timeline_label();
public native void set_allocated_timeline_label(@StdString @Cast({"char*", "std::string*"}) BytePointer timeline_label);
// optional int64 scheduled_micros = 9;
public native void clear_scheduled_micros();
@MemberGetter public static native int kScheduledMicrosFieldNumber();
public static final int kScheduledMicrosFieldNumber = kScheduledMicrosFieldNumber();
public native @Cast("google::protobuf::int64") long scheduled_micros();
public native void set_scheduled_micros(@Cast("google::protobuf::int64") long value);
// optional uint32 thread_id = 10;
public native void clear_thread_id();
@MemberGetter public static native int kThreadIdFieldNumber();
public static final int kThreadIdFieldNumber = kThreadIdFieldNumber();
public native @Cast("google::protobuf::uint32") int thread_id();
public native void set_thread_id(@Cast("google::protobuf::uint32") int value);
// repeated .tensorflow.AllocationDescription referenced_tensor = 11;
public native int referenced_tensor_size();
public native void clear_referenced_tensor();
@MemberGetter public static native int kReferencedTensorFieldNumber();
public static final int kReferencedTensorFieldNumber = kReferencedTensorFieldNumber();
public native @Const @ByRef AllocationDescription referenced_tensor(int index);
public native AllocationDescription mutable_referenced_tensor(int index);
public native AllocationDescription add_referenced_tensor();
}
// -------------------------------------------------------------------
@Namespace("tensorflow") @NoOffset public static class DeviceStepStats extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public DeviceStepStats(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public DeviceStepStats(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public DeviceStepStats position(long position) {
return (DeviceStepStats)super.position(position);
}
public DeviceStepStats() { super((Pointer)null); allocate(); }
private native void allocate();
public DeviceStepStats(@Const @ByRef DeviceStepStats from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef DeviceStepStats from);
public native @ByRef @Name("operator =") DeviceStepStats put(@Const @ByRef DeviceStepStats from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef DeviceStepStats default_instance();
public native void Swap(DeviceStepStats other);
// implements Message ----------------------------------------------
public native DeviceStepStats New();
public native DeviceStepStats New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef DeviceStepStats from);
public native void MergeFrom(@Const @ByRef DeviceStepStats from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional string device = 1;
public native void clear_device();
@MemberGetter public static native int kDeviceFieldNumber();
public static final int kDeviceFieldNumber = kDeviceFieldNumber();
public native @StdString BytePointer device();
public native void set_device(@StdString BytePointer value);
public native void set_device(@StdString String value);
public native void set_device(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_device(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_device();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_device();
public native void set_allocated_device(@StdString @Cast({"char*", "std::string*"}) BytePointer device);
// repeated .tensorflow.NodeExecStats node_stats = 2;
public native int node_stats_size();
public native void clear_node_stats();
@MemberGetter public static native int kNodeStatsFieldNumber();
public static final int kNodeStatsFieldNumber = kNodeStatsFieldNumber();
public native @Const @ByRef NodeExecStats node_stats(int index);
public native NodeExecStats mutable_node_stats(int index);
public native NodeExecStats add_node_stats();
}
// -------------------------------------------------------------------
@Namespace("tensorflow") @NoOffset public static class StepStats extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public StepStats(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public StepStats(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public StepStats position(long position) {
return (StepStats)super.position(position);
}
public StepStats() { super((Pointer)null); allocate(); }
private native void allocate();
public StepStats(@Const @ByRef StepStats from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef StepStats from);
public native @ByRef @Name("operator =") StepStats put(@Const @ByRef StepStats from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef StepStats default_instance();
public native void Swap(StepStats other);
// implements Message ----------------------------------------------
public native StepStats New();
public native StepStats New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef StepStats from);
public native void MergeFrom(@Const @ByRef StepStats from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// repeated .tensorflow.DeviceStepStats dev_stats = 1;
public native int dev_stats_size();
public native void clear_dev_stats();
@MemberGetter public static native int kDevStatsFieldNumber();
public static final int kDevStatsFieldNumber = kDevStatsFieldNumber();
public native @Const @ByRef DeviceStepStats dev_stats(int index);
public native DeviceStepStats mutable_dev_stats(int index);
public native DeviceStepStats add_dev_stats();
}
// ===================================================================
// ===================================================================
// #if !PROTOBUF_INLINE_NOT_IN_HEADERS
// AllocatorMemoryUsed
// optional string allocator_name = 1;
// optional int64 total_bytes = 2;
// optional int64 peak_bytes = 3;
// -------------------------------------------------------------------
// NodeOutput
// optional int32 slot = 1;
// optional .tensorflow.TensorDescription tensor_description = 3;
// -------------------------------------------------------------------
// NodeExecStats
// optional string node_name = 1;
// optional int64 all_start_micros = 2;
// optional int64 op_start_rel_micros = 3;
// optional int64 op_end_rel_micros = 4;
// optional int64 all_end_rel_micros = 5;
// repeated .tensorflow.AllocatorMemoryUsed memory = 6;
// repeated .tensorflow.NodeOutput output = 7;
// optional string timeline_label = 8;
// optional int64 scheduled_micros = 9;
// optional uint32 thread_id = 10;
// repeated .tensorflow.AllocationDescription referenced_tensor = 11;
// -------------------------------------------------------------------
// DeviceStepStats
// optional string device = 1;
// repeated .tensorflow.NodeExecStats node_stats = 2;
// -------------------------------------------------------------------
// StepStats
// repeated .tensorflow.DeviceStepStats dev_stats = 1;
// #endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// @@protoc_insertion_point(namespace_scope)
// namespace tensorflow
// @@protoc_insertion_point(global_scope)
// #endif // PROTOBUF_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto__INCLUDED
// Parsed from tensorflow/core/framework/versions.pb.h
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/framework/versions.proto
// #ifndef PROTOBUF_tensorflow_2fcore_2fframework_2fversions_2eproto__INCLUDED
// #define PROTOBUF_tensorflow_2fcore_2fframework_2fversions_2eproto__INCLUDED
// #include
// #include
// #if GOOGLE_PROTOBUF_VERSION < 3000000
// #error This file was generated by a newer version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please update
// #error your headers.
// #endif
// #if 3000000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
// #error This file was generated by an older version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please
// #error regenerate this file with a newer version of protoc.
// #endif
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// @@protoc_insertion_point(includes)
// Internal implementation detail -- do not call these.
@Namespace("tensorflow") public static native void protobuf_AddDesc_tensorflow_2fcore_2fframework_2fversions_2eproto();
@Namespace("tensorflow") public static native void protobuf_AssignDesc_tensorflow_2fcore_2fframework_2fversions_2eproto();
@Namespace("tensorflow") public static native void protobuf_ShutdownFile_tensorflow_2fcore_2fframework_2fversions_2eproto();
// ===================================================================
@Namespace("tensorflow") @NoOffset public static class VersionDef extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public VersionDef(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public VersionDef(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public VersionDef position(long position) {
return (VersionDef)super.position(position);
}
public VersionDef() { super((Pointer)null); allocate(); }
private native void allocate();
public VersionDef(@Const @ByRef VersionDef from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef VersionDef from);
public native @ByRef @Name("operator =") VersionDef put(@Const @ByRef VersionDef from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef VersionDef default_instance();
public native void Swap(VersionDef other);
// implements Message ----------------------------------------------
public native VersionDef New();
public native VersionDef New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef VersionDef from);
public native void MergeFrom(@Const @ByRef VersionDef from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional int32 producer = 1;
public native void clear_producer();
@MemberGetter public static native int kProducerFieldNumber();
public static final int kProducerFieldNumber = kProducerFieldNumber();
public native @Cast("google::protobuf::int32") int producer();
public native void set_producer(@Cast("google::protobuf::int32") int value);
// optional int32 min_consumer = 2;
public native void clear_min_consumer();
@MemberGetter public static native int kMinConsumerFieldNumber();
public static final int kMinConsumerFieldNumber = kMinConsumerFieldNumber();
public native @Cast("google::protobuf::int32") int min_consumer();
public native void set_min_consumer(@Cast("google::protobuf::int32") int value);
// repeated int32 bad_consumers = 3;
public native int bad_consumers_size();
public native void clear_bad_consumers();
@MemberGetter public static native int kBadConsumersFieldNumber();
public static final int kBadConsumersFieldNumber = kBadConsumersFieldNumber();
public native @Cast("google::protobuf::int32") int bad_consumers(int index);
public native void set_bad_consumers(int index, @Cast("google::protobuf::int32") int value);
public native void add_bad_consumers(@Cast("google::protobuf::int32") int value);
}
// ===================================================================
// ===================================================================
// #if !PROTOBUF_INLINE_NOT_IN_HEADERS
// VersionDef
// optional int32 producer = 1;
// optional int32 min_consumer = 2;
// repeated int32 bad_consumers = 3;
// #endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// @@protoc_insertion_point(namespace_scope)
// namespace tensorflow
// @@protoc_insertion_point(global_scope)
// #endif // PROTOBUF_tensorflow_2fcore_2fframework_2fversions_2eproto__INCLUDED
// Parsed from tensorflow/core/public/session_options.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// #ifndef TENSORFLOW_PUBLIC_SESSION_OPTIONS_H_
// #define TENSORFLOW_PUBLIC_SESSION_OPTIONS_H_
// #include
// #include "tensorflow/core/platform/types.h"
// #include "tensorflow/core/protobuf/config.pb.h"
/** Configuration information for a Session. */
@Namespace("tensorflow") @NoOffset public static class SessionOptions extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public SessionOptions(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public SessionOptions(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public SessionOptions position(long position) {
return (SessionOptions)super.position(position);
}
/** The environment to use. */
///
///
///
///
///
public native Env env(); public native SessionOptions env(Env env);
/** \brief The TensorFlow runtime to connect to.
*
* If 'target' is empty or unspecified, the local TensorFlow runtime
* implementation will be used. Otherwise, the TensorFlow engine
* defined by 'target' will be used to perform all computations.
*
* "target" can be either a single entry or a comma separated list
* of entries. Each entry is a resolvable address of the
* following format:
* local
* ip:port
* host:port
* ... other system-specific formats to identify tasks and jobs ...
*
* NOTE: at the moment 'local' maps to an in-process service-based
* runtime.
*
* Upon creation, a single session affines itself to one of the
* remote processes, with possible load balancing choices when the
* "target" resolves to a list of possible processes.
*
* If the session disconnects from the remote process during its
* lifetime, session calls may fail immediately. */
public native @StdString BytePointer target(); public native SessionOptions target(BytePointer target);
/** Configuration options. */
public native @ByRef ConfigProto config(); public native SessionOptions config(ConfigProto config);
public SessionOptions() { super((Pointer)null); allocate(); }
private native void allocate();
}
// end namespace tensorflow
// #endif // TENSORFLOW_PUBLIC_SESSION_OPTIONS_H_
// Parsed from tensorflow/core/lib/core/threadpool.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// #ifndef TENSORFLOW_LIB_CORE_THREADPOOL_H_
// #define TENSORFLOW_LIB_CORE_THREADPOOL_H_
// #include
// #include
// #include "tensorflow/core/platform/env.h"
// #include "tensorflow/core/platform/macros.h"
// #include "tensorflow/core/platform/types.h"
@Namespace("tensorflow::thread") @NoOffset public static class ThreadPool extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ThreadPool(Pointer p) { super(p); }
// Construct a pool that contains "num_threads" threads with specified "name".
// env->StartThread() is used to create individual threads.
//
// REQUIRES: num_threads > 0
public ThreadPool(Env env, @StdString BytePointer name, int num_threads) { super((Pointer)null); allocate(env, name, num_threads); }
private native void allocate(Env env, @StdString BytePointer name, int num_threads);
public ThreadPool(Env env, @StdString String name, int num_threads) { super((Pointer)null); allocate(env, name, num_threads); }
private native void allocate(Env env, @StdString String name, int num_threads);
// Construct a pool that contains "num_threads" threads with specified "name".
// env->StartThread() is used to create individual threads.
//
// REQUIRES: num_threads > 0
public ThreadPool(Env env, @Const @ByRef ThreadOptions thread_options, @StdString BytePointer name,
int num_threads) { super((Pointer)null); allocate(env, thread_options, name, num_threads); }
private native void allocate(Env env, @Const @ByRef ThreadOptions thread_options, @StdString BytePointer name,
int num_threads);
public ThreadPool(Env env, @Const @ByRef ThreadOptions thread_options, @StdString String name,
int num_threads) { super((Pointer)null); allocate(env, thread_options, name, num_threads); }
private native void allocate(Env env, @Const @ByRef ThreadOptions thread_options, @StdString String name,
int num_threads);
// Wait until all scheduled work has finished and then destroy the
// set of threads.
// Schedule fn() for execution in the pool of threads.
public native void Schedule(@ByVal Fn fn);
@Opaque public static class Impl extends Pointer {
/** Empty constructor. Calls {@code super((Pointer)null)}. */
public Impl() { super((Pointer)null); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Impl(Pointer p) { super(p); }
}
}
// namespace thread
// namespace tensorflow
// #endif // TENSORFLOW_LIB_CORE_THREADPOOL_H_
// Parsed from tensorflow/core/framework/allocation_description.pb.h
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/framework/allocation_description.proto
// #ifndef PROTOBUF_tensorflow_2fcore_2fframework_2fallocation_5fdescription_2eproto__INCLUDED
// #define PROTOBUF_tensorflow_2fcore_2fframework_2fallocation_5fdescription_2eproto__INCLUDED
// #include
// #include
// #if GOOGLE_PROTOBUF_VERSION < 3000000
// #error This file was generated by a newer version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please update
// #error your headers.
// #endif
// #if 3000000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
// #error This file was generated by an older version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please
// #error regenerate this file with a newer version of protoc.
// #endif
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// @@protoc_insertion_point(includes)
// Internal implementation detail -- do not call these.
@Namespace("tensorflow") public static native void protobuf_AddDesc_tensorflow_2fcore_2fframework_2fallocation_5fdescription_2eproto();
@Namespace("tensorflow") public static native void protobuf_AssignDesc_tensorflow_2fcore_2fframework_2fallocation_5fdescription_2eproto();
@Namespace("tensorflow") public static native void protobuf_ShutdownFile_tensorflow_2fcore_2fframework_2fallocation_5fdescription_2eproto();
// ===================================================================
@Namespace("tensorflow") @NoOffset public static class AllocationDescription extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public AllocationDescription(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public AllocationDescription(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public AllocationDescription position(long position) {
return (AllocationDescription)super.position(position);
}
public AllocationDescription() { super((Pointer)null); allocate(); }
private native void allocate();
public AllocationDescription(@Const @ByRef AllocationDescription from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef AllocationDescription from);
public native @ByRef @Name("operator =") AllocationDescription put(@Const @ByRef AllocationDescription from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef AllocationDescription default_instance();
public native void Swap(AllocationDescription other);
// implements Message ----------------------------------------------
public native AllocationDescription New();
public native AllocationDescription New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef AllocationDescription from);
public native void MergeFrom(@Const @ByRef AllocationDescription from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional int64 requested_bytes = 1;
public native void clear_requested_bytes();
@MemberGetter public static native int kRequestedBytesFieldNumber();
public static final int kRequestedBytesFieldNumber = kRequestedBytesFieldNumber();
public native @Cast("google::protobuf::int64") long requested_bytes();
public native void set_requested_bytes(@Cast("google::protobuf::int64") long value);
// optional int64 allocated_bytes = 2;
public native void clear_allocated_bytes();
@MemberGetter public static native int kAllocatedBytesFieldNumber();
public static final int kAllocatedBytesFieldNumber = kAllocatedBytesFieldNumber();
public native @Cast("google::protobuf::int64") long allocated_bytes();
public native void set_allocated_bytes(@Cast("google::protobuf::int64") long value);
// optional string allocator_name = 3;
public native void clear_allocator_name();
@MemberGetter public static native int kAllocatorNameFieldNumber();
public static final int kAllocatorNameFieldNumber = kAllocatorNameFieldNumber();
public native @StdString BytePointer allocator_name();
public native void set_allocator_name(@StdString BytePointer value);
public native void set_allocator_name(@StdString String value);
public native void set_allocator_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_allocator_name(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_allocator_name();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_allocator_name();
public native void set_allocated_allocator_name(@StdString @Cast({"char*", "std::string*"}) BytePointer allocator_name);
// optional int64 allocation_id = 4;
public native void clear_allocation_id();
@MemberGetter public static native int kAllocationIdFieldNumber();
public static final int kAllocationIdFieldNumber = kAllocationIdFieldNumber();
public native @Cast("google::protobuf::int64") long allocation_id();
public native void set_allocation_id(@Cast("google::protobuf::int64") long value);
// optional bool has_single_reference = 5;
public native void clear_has_single_reference();
@MemberGetter public static native int kHasSingleReferenceFieldNumber();
public static final int kHasSingleReferenceFieldNumber = kHasSingleReferenceFieldNumber();
public native @Cast("bool") boolean has_single_reference();
public native void set_has_single_reference(@Cast("bool") boolean value);
// optional uint64 ptr = 6;
public native void clear_ptr();
@MemberGetter public static native int kPtrFieldNumber();
public static final int kPtrFieldNumber = kPtrFieldNumber();
public native @Cast("google::protobuf::uint64") long ptr();
public native void set_ptr(@Cast("google::protobuf::uint64") long value);
}
// ===================================================================
// ===================================================================
// #if !PROTOBUF_INLINE_NOT_IN_HEADERS
// AllocationDescription
// optional int64 requested_bytes = 1;
// optional int64 allocated_bytes = 2;
// optional string allocator_name = 3;
// optional int64 allocation_id = 4;
// optional bool has_single_reference = 5;
// optional uint64 ptr = 6;
// #endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// @@protoc_insertion_point(namespace_scope)
// namespace tensorflow
// @@protoc_insertion_point(global_scope)
// #endif // PROTOBUF_tensorflow_2fcore_2fframework_2fallocation_5fdescription_2eproto__INCLUDED
// Parsed from tensorflow/core/framework/allocator.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// #ifndef TENSORFLOW_FRAMEWORK_ALLOCATOR_H_
// #define TENSORFLOW_FRAMEWORK_ALLOCATOR_H_
// #include
// #include
// #include
// #include "tensorflow/core/framework/numeric_types.h"
// #include "tensorflow/core/framework/type_traits.h"
// #include "tensorflow/core/platform/logging.h"
// #include "tensorflow/core/platform/types.h"
// Attributes for a single allocation call. Different calls to the same
// allocator could potentially have different allocation attributes.
@Namespace("tensorflow") public static class AllocationAttributes extends Pointer {
static { Loader.load(); }
/** Default native constructor. */
public AllocationAttributes() { super((Pointer)null); allocate(); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public AllocationAttributes(long size) { super((Pointer)null); allocateArray(size); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public AllocationAttributes(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(long size);
@Override public AllocationAttributes position(long position) {
return (AllocationAttributes)super.position(position);
}
// If the first attempt to allocate the memory fails, the allocation
// should return immediately without retrying.
// An example use case is optional scratch spaces where a failure
// has only performance impact.
public native @Cast("bool") boolean no_retry_on_failure(); public native AllocationAttributes no_retry_on_failure(boolean no_retry_on_failure);
// If a Tensor is allocated without the following set to true, then
// it is logged as an unknown allocation. During execution Tensors
// should be allocated through the OpKernelContext which records
// which Op is performing the allocation, and sets this flag to
// true.
public native @Cast("bool") boolean allocation_will_be_logged(); public native AllocationAttributes allocation_will_be_logged(boolean allocation_will_be_logged);
}
// Runtime statistics collected by an allocator.
@Namespace("tensorflow") @NoOffset public static class AllocatorStats extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public AllocatorStats(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public AllocatorStats(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public AllocatorStats position(long position) {
return (AllocatorStats)super.position(position);
}
public native @Cast("tensorflow::int64") long num_allocs(); public native AllocatorStats num_allocs(long num_allocs); // Number of allocations.
public native @Cast("tensorflow::int64") long bytes_in_use(); public native AllocatorStats bytes_in_use(long bytes_in_use); // Number of bytes in use.
public native @Cast("tensorflow::int64") long max_bytes_in_use(); public native AllocatorStats max_bytes_in_use(long max_bytes_in_use); // The maximum bytes in use.
public native @Cast("tensorflow::int64") long max_alloc_size(); public native AllocatorStats max_alloc_size(long max_alloc_size); // The max single allocation seen.
// The upper limit what the allocator can allocate, if such a limit
// is known. Certain allocator may return 0 to indicate the limit is
// unknown.
public native @Cast("tensorflow::int64") long bytes_limit(); public native AllocatorStats bytes_limit(long bytes_limit);
public AllocatorStats() { super((Pointer)null); allocate(); }
private native void allocate();
public native void Clear();
public native @StdString BytePointer DebugString();
}
// Allocator is an abstract interface for allocating and deallocating
// device memory.
@Namespace("tensorflow") public static class Allocator extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Allocator(Pointer p) { super(p); }
// Return a string identifying this allocator
public native @StdString BytePointer Name();
// Return an uninitialized block of memory that is "num_bytes" bytes
// in size. The returned pointer is guaranteed to be aligned to a
// multiple of "alignment" bytes.
// REQUIRES: "alignment" is a power of 2.
public native Pointer AllocateRaw(@Cast("size_t") long alignment, @Cast("size_t") long num_bytes);
// Return an uninitialized block of memory that is "num_bytes" bytes
// in size with specified allocation attributes. The returned pointer is
// guaranteed to be aligned to a multiple of "alignment" bytes.
// REQUIRES: "alignment" is a power of 2.
public native Pointer AllocateRaw(@Cast("size_t") long alignment, @Cast("size_t") long num_bytes,
@Const @ByRef AllocationAttributes allocation_attr);
// Deallocate a block of memory pointer to by "ptr"
// REQUIRES: "ptr" was previously returned by a call to AllocateRaw
public native void DeallocateRaw(Pointer ptr);
// Convenience functions to do typed allocation. C++ constructors
// and destructors are invoked for complex types if necessary,
// depending on the concrete Allocator implementation. May return
// NULL if the tensor has too many elements to represent in a single
// allocation.
// Returns true if this allocator tracks the sizes of allocations.
// RequestedSize and AllocatedSize must be overridden if
// TracksAllocationSizes is overridden to return true.
public native @Cast("bool") boolean TracksAllocationSizes();
// Returns true if this allocator requires tensors with 0 elements
// to allocate buffers. This is false for most allocators, but may
// be used by special-case allocators that want to track tensor
// usage.
public native @Cast("bool") boolean ShouldAllocateEmptyTensors();
// Returns the user-requested size of the data allocated at
// 'ptr'. Note that the actual buffer allocated might be larger
// than requested, but this function returns the size requested by
// the user.
//
// REQUIRES: TracksAllocationSizes() is true.
//
// REQUIRES: 'ptr!=nullptr' and points to a buffer previously
// allocated by this allocator.
public native @Cast("size_t") long RequestedSize(Pointer ptr);
// Returns the allocated size of the buffer at 'ptr' if known,
// otherwise returns RequestedSize(ptr). AllocatedSize(ptr) is
// guaranteed to be >= RequestedSize(ptr).
//
// REQUIRES: TracksAllocationSizes() is true.
//
// REQUIRES: 'ptr!=nullptr' and points to a buffer previously
// allocated by this allocator.
public native @Cast("size_t") long AllocatedSize(Pointer ptr);
// Returns either 0 or an identifier assigned to the buffer at 'ptr'
// when the buffer was returned by AllocateRaw. If non-zero, the
// identifier differs from every other ID assigned by this
// allocator.
//
// REQUIRES: TracksAllocationSizes() is true.
//
// REQUIRES: 'ptr!=nullptr' and points to a buffer previously
// allocated by this allocator.
public native @Cast("tensorflow::int64") long AllocationId(Pointer ptr);
// Returns the allocated size of the buffer at 'ptr' if known,
// otherwise returns 0. This method can be called when
// TracksAllocationSizes() is false, but can be extremely slow.
//
// REQUIRES: 'ptr!=nullptr' and points to a buffer previously
// allocated by this allocator.
public native @Cast("size_t") long AllocatedSizeSlow(Pointer ptr);
// is_simple::value if T[] can be safely constructed and destructed
// without running T() and ~T(). We do not use std::is_trivial
// directly because std::complex and std::complex are
// not trival, but their arrays can be constructed and destructed
// without running their default ctors and dtors.
// Fills in 'stats' with statistics collected by this allocator.
public native void GetStats(AllocatorStats stats);
}
// Allocator-specific constructors and destructors are used for
// strings
// A tensorflow Op may need access to different kinds of memory that
// are not simply a function of the device to which the Op has been
// assigned. For example, an Op executing on a GPU may still need
// to allocate CPU RAM for some purpose. Internal to the tensorflow
// runtime we may choose to allocate CPU ram from special regions
// that have been prepared for higher performance in some use
// contexts, e.g. doing DMA with particular devices. For these
// reasons, the Device interface does not expose just one memory
// Allocator, but instead provides an accessor that takes a
// specification of the desired memory attributes in order to select
// an Allocator.
//
// Example use:
// // Allocator for ordinary device memory:
// Allocator* a = allocator(AllocatorAttributes());
// ...
// // Allocator for CPU RAM, regardless of where Op is executing:
// AllocatorAttributes attr;
// attr.set_on_host(true);
// Allocator* a = allocator(attr);
@Namespace("tensorflow") public static class AllocatorAttributes extends Pointer {
static { Loader.load(); }
/** Default native constructor. */
public AllocatorAttributes() { super((Pointer)null); allocate(); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public AllocatorAttributes(long size) { super((Pointer)null); allocateArray(size); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public AllocatorAttributes(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(long size);
@Override public AllocatorAttributes position(long position) {
return (AllocatorAttributes)super.position(position);
}
public native void set_on_host(@Cast("bool") boolean v);
public native @Cast("bool") boolean on_host();
public native void set_nic_compatible(@Cast("bool") boolean v);
public native @Cast("bool") boolean nic_compatible();
public native void set_gpu_compatible(@Cast("bool") boolean v);
public native @Cast("bool") boolean gpu_compatible();
public native void set_track_sizes(@Cast("bool") boolean v);
public native @Cast("bool") boolean track_sizes();
public native void Merge(@ByVal AllocatorAttributes other);
// NOTE: The upper 8 bits of the value are reserved for
// device-specific uses. Implementors of a device can interpret these
// upper 8 bits in device-specific ways, and ops implemented for those
// devices are responsible for setting those 8 bits appropriately.
public native int value(); public native AllocatorAttributes value(int value);
}
// Returns a trivial implementation of Allocator which uses the system
// default malloc. The returned allocator is a process singleton.
@Namespace("tensorflow") public static native Allocator cpu_allocator();
// If 'enable' is true, the process-wide cpu allocator collects
// AllocatorStats. By default, it's disabled.
@Namespace("tensorflow") public static native void EnableCPUAllocatorStats(@Cast("bool") boolean enable);
// If 'enable' is true, the process-wide cpu allocator collects
// detailed statistics. This can be slow, so this is disabled by
// default.
@Namespace("tensorflow") public static native void EnableCPUAllocatorDetailedStats(@Cast("bool") boolean enable);
// Abstract interface of an object that does the underlying suballoc/free of
// memory for a higher-level allocator.
@Namespace("tensorflow") public static class SubAllocator extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public SubAllocator(Pointer p) { super(p); }
public native Pointer Alloc(@Cast("size_t") long alignment, @Cast("size_t") long num_bytes);
public native void Free(Pointer ptr, @Cast("size_t") long num_bytes);
}
// namespace tensorflow
// #endif // TENSORFLOW_FRAMEWORK_ALLOCATOR_H_
// Parsed from tensorflow/core/framework/tensor_shape.pb.h
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/framework/tensor_shape.proto
// #ifndef PROTOBUF_tensorflow_2fcore_2fframework_2ftensor_5fshape_2eproto__INCLUDED
// #define PROTOBUF_tensorflow_2fcore_2fframework_2ftensor_5fshape_2eproto__INCLUDED
// #include
// #include
// #if GOOGLE_PROTOBUF_VERSION < 3000000
// #error This file was generated by a newer version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please update
// #error your headers.
// #endif
// #if 3000000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
// #error This file was generated by an older version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please
// #error regenerate this file with a newer version of protoc.
// #endif
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// @@protoc_insertion_point(includes)
// Internal implementation detail -- do not call these.
@Namespace("tensorflow") public static native void protobuf_AddDesc_tensorflow_2fcore_2fframework_2ftensor_5fshape_2eproto();
@Namespace("tensorflow") public static native void protobuf_AssignDesc_tensorflow_2fcore_2fframework_2ftensor_5fshape_2eproto();
@Namespace("tensorflow") public static native void protobuf_ShutdownFile_tensorflow_2fcore_2fframework_2ftensor_5fshape_2eproto();
// ===================================================================
@Namespace("tensorflow") @NoOffset public static class TensorShapeProto_Dim extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TensorShapeProto_Dim(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public TensorShapeProto_Dim(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public TensorShapeProto_Dim position(long position) {
return (TensorShapeProto_Dim)super.position(position);
}
public TensorShapeProto_Dim() { super((Pointer)null); allocate(); }
private native void allocate();
public TensorShapeProto_Dim(@Const @ByRef TensorShapeProto_Dim from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef TensorShapeProto_Dim from);
public native @ByRef @Name("operator =") TensorShapeProto_Dim put(@Const @ByRef TensorShapeProto_Dim from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef TensorShapeProto_Dim default_instance();
public native void Swap(TensorShapeProto_Dim other);
// implements Message ----------------------------------------------
public native TensorShapeProto_Dim New();
public native TensorShapeProto_Dim New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef TensorShapeProto_Dim from);
public native void MergeFrom(@Const @ByRef TensorShapeProto_Dim from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional int64 size = 1;
public native void clear_size();
@MemberGetter public static native int kSizeFieldNumber();
public static final int kSizeFieldNumber = kSizeFieldNumber();
public native @Cast("google::protobuf::int64") long size();
public native void set_size(@Cast("google::protobuf::int64") long value);
// optional string name = 2;
public native void clear_name();
@MemberGetter public static native int kNameFieldNumber();
public static final int kNameFieldNumber = kNameFieldNumber();
public native @StdString BytePointer name();
public native void set_name(@StdString BytePointer value);
public native void set_name(@StdString String value);
public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_name(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name();
public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name);
}
// -------------------------------------------------------------------
@Namespace("tensorflow") @NoOffset public static class TensorShapeProto extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TensorShapeProto(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public TensorShapeProto(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public TensorShapeProto position(long position) {
return (TensorShapeProto)super.position(position);
}
public TensorShapeProto() { super((Pointer)null); allocate(); }
private native void allocate();
public TensorShapeProto(@Const @ByRef TensorShapeProto from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef TensorShapeProto from);
public native @ByRef @Name("operator =") TensorShapeProto put(@Const @ByRef TensorShapeProto from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef TensorShapeProto default_instance();
public native void Swap(TensorShapeProto other);
// implements Message ----------------------------------------------
public native TensorShapeProto New();
public native TensorShapeProto New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef TensorShapeProto from);
public native void MergeFrom(@Const @ByRef TensorShapeProto from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// repeated .tensorflow.TensorShapeProto.Dim dim = 2;
public native int dim_size();
public native void clear_dim();
@MemberGetter public static native int kDimFieldNumber();
public static final int kDimFieldNumber = kDimFieldNumber();
public native @Const @ByRef TensorShapeProto_Dim dim(int index);
public native TensorShapeProto_Dim mutable_dim(int index);
public native TensorShapeProto_Dim add_dim();
// optional bool unknown_rank = 3;
public native void clear_unknown_rank();
@MemberGetter public static native int kUnknownRankFieldNumber();
public static final int kUnknownRankFieldNumber = kUnknownRankFieldNumber();
public native @Cast("bool") boolean unknown_rank();
public native void set_unknown_rank(@Cast("bool") boolean value);
}
// ===================================================================
// ===================================================================
// #if !PROTOBUF_INLINE_NOT_IN_HEADERS
// TensorShapeProto_Dim
// optional int64 size = 1;
// optional string name = 2;
// -------------------------------------------------------------------
// TensorShapeProto
// repeated .tensorflow.TensorShapeProto.Dim dim = 2;
// optional bool unknown_rank = 3;
// #endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// -------------------------------------------------------------------
// @@protoc_insertion_point(namespace_scope)
// namespace tensorflow
// @@protoc_insertion_point(global_scope)
// #endif // PROTOBUF_tensorflow_2fcore_2fframework_2ftensor_5fshape_2eproto__INCLUDED
// Parsed from tensorflow/core/framework/types.pb.h
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/framework/types.proto
// #ifndef PROTOBUF_tensorflow_2fcore_2fframework_2ftypes_2eproto__INCLUDED
// #define PROTOBUF_tensorflow_2fcore_2fframework_2ftypes_2eproto__INCLUDED
// #include
// #include
// #if GOOGLE_PROTOBUF_VERSION < 3000000
// #error This file was generated by a newer version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please update
// #error your headers.
// #endif
// #if 3000000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
// #error This file was generated by an older version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please
// #error regenerate this file with a newer version of protoc.
// #endif
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// @@protoc_insertion_point(includes)
// Internal implementation detail -- do not call these.
@Namespace("tensorflow") public static native void protobuf_AddDesc_tensorflow_2fcore_2fframework_2ftypes_2eproto();
@Namespace("tensorflow") public static native void protobuf_AssignDesc_tensorflow_2fcore_2fframework_2ftypes_2eproto();
@Namespace("tensorflow") public static native void protobuf_ShutdownFile_tensorflow_2fcore_2fframework_2ftypes_2eproto();
/** enum tensorflow::DataType */
public static final int
DT_INVALID = 0,
DT_FLOAT = 1,
DT_DOUBLE = 2,
DT_INT32 = 3,
DT_UINT8 = 4,
DT_INT16 = 5,
DT_INT8 = 6,
DT_STRING = 7,
DT_COMPLEX64 = 8,
DT_INT64 = 9,
DT_BOOL = 10,
DT_QINT8 = 11,
DT_QUINT8 = 12,
DT_QINT32 = 13,
DT_BFLOAT16 = 14,
DT_QINT16 = 15,
DT_QUINT16 = 16,
DT_UINT16 = 17,
DT_COMPLEX128 = 18,
DT_HALF = 19,
DT_FLOAT_REF = 101,
DT_DOUBLE_REF = 102,
DT_INT32_REF = 103,
DT_UINT8_REF = 104,
DT_INT16_REF = 105,
DT_INT8_REF = 106,
DT_STRING_REF = 107,
DT_COMPLEX64_REF = 108,
DT_INT64_REF = 109,
DT_BOOL_REF = 110,
DT_QINT8_REF = 111,
DT_QUINT8_REF = 112,
DT_QINT32_REF = 113,
DT_BFLOAT16_REF = 114,
DT_QINT16_REF = 115,
DT_QUINT16_REF = 116,
DT_UINT16_REF = 117,
DT_COMPLEX128_REF = 118,
DT_HALF_REF = 119,
DataType_INT_MIN_SENTINEL_DO_NOT_USE_ =kint32min,
DataType_INT_MAX_SENTINEL_DO_NOT_USE_ =kint32max;
@Namespace("tensorflow") public static native @Cast("bool") boolean DataType_IsValid(int value);
@Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::DataType") int DataType_MIN();
@Namespace("tensorflow") @MemberGetter public static native @Cast("const tensorflow::DataType") int DataType_MAX();
@Namespace("tensorflow") @MemberGetter public static native int DataType_ARRAYSIZE();
@Namespace("tensorflow") public static native @Cast("const google::protobuf::EnumDescriptor*") Pointer DataType_descriptor();
@Namespace("tensorflow") public static native @StdString BytePointer DataType_Name(@Cast("tensorflow::DataType") int value);
@Namespace("tensorflow") public static native @Cast("bool") boolean DataType_Parse(
@StdString BytePointer name, @Cast("tensorflow::DataType*") IntPointer value);
@Namespace("tensorflow") public static native @Cast("bool") boolean DataType_Parse(
@StdString String name, @Cast("tensorflow::DataType*") IntPointer value);
// ===================================================================
// ===================================================================
// ===================================================================
// #if !PROTOBUF_INLINE_NOT_IN_HEADERS
// #endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// @@protoc_insertion_point(namespace_scope)
// namespace tensorflow
// #ifndef SWIG
// #endif // SWIG
// @@protoc_insertion_point(global_scope)
// #endif // PROTOBUF_tensorflow_2fcore_2fframework_2ftypes_2eproto__INCLUDED
// Parsed from tensorflow/core/framework/tensor.pb.h
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/framework/tensor.proto
// #ifndef PROTOBUF_tensorflow_2fcore_2fframework_2ftensor_2eproto__INCLUDED
// #define PROTOBUF_tensorflow_2fcore_2fframework_2ftensor_2eproto__INCLUDED
// #include
// #include
// #if GOOGLE_PROTOBUF_VERSION < 3000000
// #error This file was generated by a newer version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please update
// #error your headers.
// #endif
// #if 3000000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
// #error This file was generated by an older version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please
// #error regenerate this file with a newer version of protoc.
// #endif
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include "tensorflow/core/framework/tensor_shape.pb.h"
// #include "tensorflow/core/framework/types.pb.h"
// @@protoc_insertion_point(includes)
// Internal implementation detail -- do not call these.
@Namespace("tensorflow") public static native void protobuf_AddDesc_tensorflow_2fcore_2fframework_2ftensor_2eproto();
@Namespace("tensorflow") public static native void protobuf_AssignDesc_tensorflow_2fcore_2fframework_2ftensor_2eproto();
@Namespace("tensorflow") public static native void protobuf_ShutdownFile_tensorflow_2fcore_2fframework_2ftensor_2eproto();
// ===================================================================
@Namespace("tensorflow") @NoOffset public static class TensorProto extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TensorProto(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public TensorProto(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public TensorProto position(long position) {
return (TensorProto)super.position(position);
}
public TensorProto() { super((Pointer)null); allocate(); }
private native void allocate();
public TensorProto(@Const @ByRef TensorProto from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef TensorProto from);
public native @ByRef @Name("operator =") TensorProto put(@Const @ByRef TensorProto from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef TensorProto default_instance();
public native void Swap(TensorProto other);
// implements Message ----------------------------------------------
public native TensorProto New();
public native TensorProto New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef TensorProto from);
public native void MergeFrom(@Const @ByRef TensorProto from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional .tensorflow.DataType dtype = 1;
public native void clear_dtype();
@MemberGetter public static native int kDtypeFieldNumber();
public static final int kDtypeFieldNumber = kDtypeFieldNumber();
public native @Cast("tensorflow::DataType") int dtype();
public native void set_dtype(@Cast("tensorflow::DataType") int value);
// optional .tensorflow.TensorShapeProto tensor_shape = 2;
public native @Cast("bool") boolean has_tensor_shape();
public native void clear_tensor_shape();
@MemberGetter public static native int kTensorShapeFieldNumber();
public static final int kTensorShapeFieldNumber = kTensorShapeFieldNumber();
public native @Const @ByRef TensorShapeProto tensor_shape();
public native TensorShapeProto mutable_tensor_shape();
public native TensorShapeProto release_tensor_shape();
public native void set_allocated_tensor_shape(TensorShapeProto tensor_shape);
// optional int32 version_number = 3;
public native void clear_version_number();
@MemberGetter public static native int kVersionNumberFieldNumber();
public static final int kVersionNumberFieldNumber = kVersionNumberFieldNumber();
public native @Cast("google::protobuf::int32") int version_number();
public native void set_version_number(@Cast("google::protobuf::int32") int value);
// optional bytes tensor_content = 4;
public native void clear_tensor_content();
@MemberGetter public static native int kTensorContentFieldNumber();
public static final int kTensorContentFieldNumber = kTensorContentFieldNumber();
public native @StdString BytePointer tensor_content();
public native void set_tensor_content(@StdString BytePointer value);
public native void set_tensor_content(@StdString String value);
public native void set_tensor_content(@Const Pointer value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_tensor_content();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_tensor_content();
public native void set_allocated_tensor_content(@StdString @Cast({"char*", "std::string*"}) BytePointer tensor_content);
// repeated int32 half_val = 13 [packed = true];
public native int half_val_size();
public native void clear_half_val();
@MemberGetter public static native int kHalfValFieldNumber();
public static final int kHalfValFieldNumber = kHalfValFieldNumber();
public native @Cast("google::protobuf::int32") int half_val(int index);
public native void set_half_val(int index, @Cast("google::protobuf::int32") int value);
public native void add_half_val(@Cast("google::protobuf::int32") int value);
// repeated float float_val = 5 [packed = true];
public native int float_val_size();
public native void clear_float_val();
@MemberGetter public static native int kFloatValFieldNumber();
public static final int kFloatValFieldNumber = kFloatValFieldNumber();
public native float float_val(int index);
public native void set_float_val(int index, float value);
public native void add_float_val(float value);
// repeated double double_val = 6 [packed = true];
public native int double_val_size();
public native void clear_double_val();
@MemberGetter public static native int kDoubleValFieldNumber();
public static final int kDoubleValFieldNumber = kDoubleValFieldNumber();
public native double double_val(int index);
public native void set_double_val(int index, double value);
public native void add_double_val(double value);
// repeated int32 int_val = 7 [packed = true];
public native int int_val_size();
public native void clear_int_val();
@MemberGetter public static native int kIntValFieldNumber();
public static final int kIntValFieldNumber = kIntValFieldNumber();
public native @Cast("google::protobuf::int32") int int_val(int index);
public native void set_int_val(int index, @Cast("google::protobuf::int32") int value);
public native void add_int_val(@Cast("google::protobuf::int32") int value);
// repeated bytes string_val = 8;
public native int string_val_size();
public native void clear_string_val();
@MemberGetter public static native int kStringValFieldNumber();
public static final int kStringValFieldNumber = kStringValFieldNumber();
public native @StdString BytePointer string_val(int index);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_string_val(int index);
public native void set_string_val(int index, @StdString BytePointer value);
public native void set_string_val(int index, @StdString String value);
public native void set_string_val(int index, @Const Pointer value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_string_val();
public native void add_string_val(@StdString BytePointer value);
public native void add_string_val(@StdString String value);
public native void add_string_val(@Const Pointer value, @Cast("size_t") long size);
// repeated float scomplex_val = 9 [packed = true];
public native int scomplex_val_size();
public native void clear_scomplex_val();
@MemberGetter public static native int kScomplexValFieldNumber();
public static final int kScomplexValFieldNumber = kScomplexValFieldNumber();
public native float scomplex_val(int index);
public native void set_scomplex_val(int index, float value);
public native void add_scomplex_val(float value);
// repeated int64 int64_val = 10 [packed = true];
public native int int64_val_size();
public native void clear_int64_val();
@MemberGetter public static native int kInt64ValFieldNumber();
public static final int kInt64ValFieldNumber = kInt64ValFieldNumber();
public native @Cast("google::protobuf::int64") long int64_val(int index);
public native void set_int64_val(int index, @Cast("google::protobuf::int64") long value);
public native void add_int64_val(@Cast("google::protobuf::int64") long value);
// repeated bool bool_val = 11 [packed = true];
public native int bool_val_size();
public native void clear_bool_val();
@MemberGetter public static native int kBoolValFieldNumber();
public static final int kBoolValFieldNumber = kBoolValFieldNumber();
public native @Cast("bool") boolean bool_val(int index);
public native void set_bool_val(int index, @Cast("bool") boolean value);
public native void add_bool_val(@Cast("bool") boolean value);
// repeated double dcomplex_val = 12 [packed = true];
public native int dcomplex_val_size();
public native void clear_dcomplex_val();
@MemberGetter public static native int kDcomplexValFieldNumber();
public static final int kDcomplexValFieldNumber = kDcomplexValFieldNumber();
public native double dcomplex_val(int index);
public native void set_dcomplex_val(int index, double value);
public native void add_dcomplex_val(double value);
}
// ===================================================================
// ===================================================================
// #if !PROTOBUF_INLINE_NOT_IN_HEADERS
// TensorProto
// optional .tensorflow.DataType dtype = 1;
// optional .tensorflow.TensorShapeProto tensor_shape = 2;
// optional int32 version_number = 3;
// optional bytes tensor_content = 4;
// repeated int32 half_val = 13 [packed = true];
// repeated float float_val = 5 [packed = true];
// repeated double double_val = 6 [packed = true];
// repeated int32 int_val = 7 [packed = true];
// repeated bytes string_val = 8;
// repeated float scomplex_val = 9 [packed = true];
// repeated int64 int64_val = 10 [packed = true];
// repeated bool bool_val = 11 [packed = true];
// repeated double dcomplex_val = 12 [packed = true];
// #endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// @@protoc_insertion_point(namespace_scope)
// namespace tensorflow
// @@protoc_insertion_point(global_scope)
// #endif // PROTOBUF_tensorflow_2fcore_2fframework_2ftensor_2eproto__INCLUDED
// Parsed from tensorflow/core/framework/tensor_description.pb.h
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/framework/tensor_description.proto
// #ifndef PROTOBUF_tensorflow_2fcore_2fframework_2ftensor_5fdescription_2eproto__INCLUDED
// #define PROTOBUF_tensorflow_2fcore_2fframework_2ftensor_5fdescription_2eproto__INCLUDED
// #include
// #include
// #if GOOGLE_PROTOBUF_VERSION < 3000000
// #error This file was generated by a newer version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please update
// #error your headers.
// #endif
// #if 3000000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
// #error This file was generated by an older version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please
// #error regenerate this file with a newer version of protoc.
// #endif
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include "tensorflow/core/framework/types.pb.h"
// #include "tensorflow/core/framework/tensor_shape.pb.h"
// #include "tensorflow/core/framework/allocation_description.pb.h"
// @@protoc_insertion_point(includes)
// Internal implementation detail -- do not call these.
@Namespace("tensorflow") public static native void protobuf_AddDesc_tensorflow_2fcore_2fframework_2ftensor_5fdescription_2eproto();
@Namespace("tensorflow") public static native void protobuf_AssignDesc_tensorflow_2fcore_2fframework_2ftensor_5fdescription_2eproto();
@Namespace("tensorflow") public static native void protobuf_ShutdownFile_tensorflow_2fcore_2fframework_2ftensor_5fdescription_2eproto();
// ===================================================================
@Namespace("tensorflow") @NoOffset public static class TensorDescription extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TensorDescription(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public TensorDescription(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public TensorDescription position(long position) {
return (TensorDescription)super.position(position);
}
public TensorDescription() { super((Pointer)null); allocate(); }
private native void allocate();
public TensorDescription(@Const @ByRef TensorDescription from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef TensorDescription from);
public native @ByRef @Name("operator =") TensorDescription put(@Const @ByRef TensorDescription from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef TensorDescription default_instance();
public native void Swap(TensorDescription other);
// implements Message ----------------------------------------------
public native TensorDescription New();
public native TensorDescription New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef TensorDescription from);
public native void MergeFrom(@Const @ByRef TensorDescription from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional .tensorflow.DataType dtype = 1;
public native void clear_dtype();
@MemberGetter public static native int kDtypeFieldNumber();
public static final int kDtypeFieldNumber = kDtypeFieldNumber();
public native @Cast("tensorflow::DataType") int dtype();
public native void set_dtype(@Cast("tensorflow::DataType") int value);
// optional .tensorflow.TensorShapeProto shape = 2;
public native @Cast("bool") boolean has_shape();
public native void clear_shape();
@MemberGetter public static native int kShapeFieldNumber();
public static final int kShapeFieldNumber = kShapeFieldNumber();
public native @Const @ByRef TensorShapeProto shape();
public native TensorShapeProto mutable_shape();
public native TensorShapeProto release_shape();
public native void set_allocated_shape(TensorShapeProto shape);
// optional .tensorflow.AllocationDescription allocation_description = 4;
public native @Cast("bool") boolean has_allocation_description();
public native void clear_allocation_description();
@MemberGetter public static native int kAllocationDescriptionFieldNumber();
public static final int kAllocationDescriptionFieldNumber = kAllocationDescriptionFieldNumber();
public native @Const @ByRef AllocationDescription allocation_description();
public native AllocationDescription mutable_allocation_description();
public native AllocationDescription release_allocation_description();
public native void set_allocated_allocation_description(AllocationDescription allocation_description);
}
// ===================================================================
// ===================================================================
// #if !PROTOBUF_INLINE_NOT_IN_HEADERS
// TensorDescription
// optional .tensorflow.DataType dtype = 1;
// optional .tensorflow.TensorShapeProto shape = 2;
// optional .tensorflow.AllocationDescription allocation_description = 4;
// #endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// @@protoc_insertion_point(namespace_scope)
// namespace tensorflow
// @@protoc_insertion_point(global_scope)
// #endif // PROTOBUF_tensorflow_2fcore_2fframework_2ftensor_5fdescription_2eproto__INCLUDED
// Parsed from tensorflow/core/framework/tensor_types.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// #ifndef TENSORFLOW_FRAMEWORK_TENSOR_TYPES_H_
// #define TENSORFLOW_FRAMEWORK_TENSOR_TYPES_H_
// #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
// Helper to define Tensor types given that the scalar is of type T.
// namespace tensorflow
// #endif // TENSORFLOW_FRAMEWORK_TENSOR_TYPES_H_
// Parsed from tensorflow/core/framework/tensor_shape.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// #ifndef TENSORFLOW_CORE_FRAMEWORK_TENSOR_SHAPE_H_
// #define TENSORFLOW_CORE_FRAMEWORK_TENSOR_SHAPE_H_
// #include
// #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
// #include "tensorflow/core/framework/tensor_shape.pb.h"
// #include "tensorflow/core/framework/types.pb.h"
// #include "tensorflow/core/lib/core/errors.h"
// #include "tensorflow/core/lib/core/status.h"
// #include "tensorflow/core/lib/core/stringpiece.h"
// #include "tensorflow/core/lib/gtl/array_slice.h"
// #include "tensorflow/core/lib/gtl/inlined_vector.h"
// #include "tensorflow/core/lib/strings/strcat.h"
// #include "tensorflow/core/platform/logging.h" // Declared below
@Namespace("tensorflow") @NoOffset public static class TensorShape extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TensorShape(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public TensorShape(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public TensorShape position(long position) {
return (TensorShape)super.position(position);
}
/** \brief Construct a {@code TensorShape} from the provided sizes.
* REQUIRES: {@code dim_sizes[i] >= 0} */
public TensorShape(@Cast("tensorflow::int64*") @ArraySlice LongPointer dim_sizes) { super((Pointer)null); allocate(dim_sizes); }
private native void allocate(@Cast("tensorflow::int64*") @ArraySlice LongPointer dim_sizes);
public TensorShape(@Cast("tensorflow::int64*") @ArraySlice LongBuffer dim_sizes) { super((Pointer)null); allocate(dim_sizes); }
private native void allocate(@Cast("tensorflow::int64*") @ArraySlice LongBuffer dim_sizes);
public TensorShape(@Cast("tensorflow::int64*") @ArraySlice long... dim_sizes) { super((Pointer)null); allocate(dim_sizes); }
private native void allocate(@Cast("tensorflow::int64*") @ArraySlice long... dim_sizes);
/** REQUIRES: {@code IsValid(proto)} */
public TensorShape(@Const @ByRef TensorShapeProto proto) { super((Pointer)null); allocate(proto); }
private native void allocate(@Const @ByRef TensorShapeProto proto);
/** Create a tensor shape with no dimensions and one element, which you can
* then call {@code AddDim()} on. */
public TensorShape() { super((Pointer)null); allocate(); }
private native void allocate();
/** Copy the specified shape */
public TensorShape(@Const @ByRef TensorShape b) { super((Pointer)null); allocate(b); }
private native void allocate(@Const @ByRef TensorShape b);
public native @Name("operator =") void put(@Const @ByRef TensorShape b);
/** Returns {@code true} iff {@code proto} is a valid tensor shape. */
public static native @Cast("bool") boolean IsValid(@Const @ByRef TensorShapeProto proto);
/** Returns {@code OK} iff {@code proto} is a valid tensor shape, and a descriptive error
* status otherwise. */
public static native @ByVal Status IsValidShape(@Const @ByRef TensorShapeProto proto);
/** Clear a tensor shape */
public native void Clear();
/** \brief Add a dimension to the end ("inner-most").
* REQUIRES: {@code size >= 0} */
public native void AddDim(@Cast("tensorflow::int64") long size);
/** Appends all the dimensions from {@code shape}. */
public native void AppendShape(@Const @ByRef TensorShape shape);
// Maximum number of dimensions in a tensor.
public static native int MaxDimensions();
/** \brief Insert a dimension somewhere in the {@code TensorShape}.
* REQUIRES: {@code 0 <= d <= dims()}
* REQUIRES: {@code size >= 0} */
public native void InsertDim(int d, @Cast("tensorflow::int64") long size);
/** \brief Modifies the size of the dimension {@code d} to be {@code size}
* REQUIRES: {@code 0 <= d < dims()}
* REQUIRES: {@code size >= 0} */
public native void set_dim(int d, @Cast("tensorflow::int64") long size);
/** \brief Removes dimension {@code d} from the {@code TensorShape}.
* REQUIRES: {@code 0 <= d < dims()} */
public native void RemoveDim(int d);
/** Return the number of dimensions in the tensor. */
public native int dims();
/** \brief Returns the number of elements in dimension {@code d}.
* REQUIRES: {@code 0 <= d < dims()} */
// TODO(touts): Rename to `dimension()` to match
// `Eigen::Tensor::dimension()`?
public native @Cast("tensorflow::int64") long dim_size(int d);
/** Returns sizes of all dimensions. */
///
public native @ByVal LongVector dim_sizes();
/** \brief Returns the number of elements in the tensor.
*
* We use {@code int64} and not {@code size_t} to be compatible with {@code Eigen::Tensor}
* which uses {@code ptrdiff_t}. */
public native @Cast("tensorflow::int64") long num_elements();
/** Returns true if {@code *this} and {@code b} have the same sizes. Ignores
* dimension names. */
public native @Cast("bool") boolean IsSameSize(@Const @ByRef TensorShape b);
public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef TensorShape b);
public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef TensorShape b);
/** Fill {@code *proto} from {@code *this}. */
public native void AsProto(TensorShapeProto proto);
/** Fill {@code *dsizes} from {@code *this}. */
/** Same as {@code AsEigenDSizes()} but allows for {@code NDIMS > dims()} -- in
* which case we pad the rest of the sizes with 1. */
/** For iterating through the dimensions. */
public native @ByVal TensorShapeIter begin();
public native @ByVal TensorShapeIter end();
/** For error messages. */
public native @StdString BytePointer DebugString();
/** Same as {@code TensorShape(proto).DebugString()} but doesn't crash for
* invalid protos. */
public static native @StdString BytePointer DebugString(@Const @ByRef TensorShapeProto proto);
public native void DumpRep();
}
@Namespace("tensorflow") @NoOffset public static class TensorShapeDim extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TensorShapeDim(Pointer p) { super(p); }
public TensorShapeDim(@Cast("tensorflow::int64") long s) { super((Pointer)null); allocate(s); }
private native void allocate(@Cast("tensorflow::int64") long s);
public native @Cast("tensorflow::int64") long size(); public native TensorShapeDim size(long size);
}
@Namespace("tensorflow") @NoOffset public static class TensorShapeIter extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TensorShapeIter(Pointer p) { super(p); }
public TensorShapeIter(@Const TensorShape shape, int d) { super((Pointer)null); allocate(shape, d); }
private native void allocate(@Const TensorShape shape, int d);
public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef TensorShapeIter rhs);
public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef TensorShapeIter rhs);
public native @Name("operator ++") void increment();
public native @ByVal @Name("operator *") TensorShapeDim multiply();
}
/** \brief Static helper routines for {@code TensorShape}. Includes a few common
* predicates on a tensor shape. */
@Namespace("tensorflow") public static class TensorShapeUtils extends Pointer {
static { Loader.load(); }
/** Default native constructor. */
public TensorShapeUtils() { super((Pointer)null); allocate(); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public TensorShapeUtils(long size) { super((Pointer)null); allocateArray(size); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TensorShapeUtils(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(long size);
@Override public TensorShapeUtils position(long position) {
return (TensorShapeUtils)super.position(position);
}
public static native @Cast("bool") boolean IsScalar(@Const @ByRef TensorShape shape);
public static native @Cast("bool") boolean IsVector(@Const @ByRef TensorShape shape);
public static native @Cast("bool") boolean IsVectorOrHigher(@Const @ByRef TensorShape shape);
public static native @Cast("bool") boolean IsMatrix(@Const @ByRef TensorShape shape);
public static native @Cast("bool") boolean IsMatrixOrHigher(@Const @ByRef TensorShape shape);
/** \brief Returns a {@code TensorShape} whose dimensions are
* {@code dims[0]}, {@code dims[1]}, ..., {@code dims[n-1]}. */
public static native @ByVal Status MakeShape(@Const IntPointer dims, int n, TensorShape out);
public static native @ByVal Status MakeShape(@Const IntBuffer dims, int n, TensorShape out);
public static native @ByVal Status MakeShape(@Const int[] dims, int n, TensorShape out);
public static native @ByVal Status MakeShape(@Cast("const tensorflow::int64*") LongPointer dims, int n, TensorShape out);
public static native @ByVal Status MakeShape(@Cast("const tensorflow::int64*") LongBuffer dims, int n, TensorShape out);
public static native @ByVal Status MakeShape(@Cast("const tensorflow::int64*") long[] dims, int n, TensorShape out);
public static native @StdString BytePointer ShapeListString(@Const @ByRef TensorShapeVector shapes);
public static native @Cast("bool") boolean StartsWith(@Const @ByRef TensorShape shape0, @Const @ByRef TensorShape shape1);
}
// ----------------------------------------------------------------------------
// Template method implementation details below
// ----------------------------------------------------------------------------
// ----------------------------------------------------------------------------
// Inlining of some performance critical routines
// ----------------------------------------------------------------------------
// namespace tensorflow
// #endif // TENSORFLOW_CORE_FRAMEWORK_TENSOR_SHAPE_H_
// Parsed from tensorflow/core/framework/tensor.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// #ifndef TENSORFLOW_CORE_FRAMEWORK_TENSOR_H_
// #define TENSORFLOW_CORE_FRAMEWORK_TENSOR_H_
// #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
// #include "tensorflow/core/framework/allocation_description.pb.h"
// #include "tensorflow/core/framework/allocator.h"
// #include "tensorflow/core/framework/tensor.pb.h"
// #include "tensorflow/core/framework/tensor_description.pb.h"
// #include "tensorflow/core/framework/tensor_shape.h"
// #include "tensorflow/core/framework/tensor_types.h"
// #include "tensorflow/core/framework/types.h"
// #include "tensorflow/core/framework/types.pb.h"
// #include "tensorflow/core/lib/core/refcount.h"
// #include "tensorflow/core/lib/core/status.h"
// #include "tensorflow/core/lib/core/stringpiece.h"
// #include "tensorflow/core/lib/gtl/inlined_vector.h"
// #include "tensorflow/core/platform/logging.h"
// #include "tensorflow/core/platform/macros.h"
// #include "tensorflow/core/platform/types.h" // Forward declaration.
@Namespace("tensorflow") @Opaque public static class TensorCApi extends Pointer {
/** Empty constructor. Calls {@code super((Pointer)null)}. */
public TensorCApi() { super((Pointer)null); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TensorCApi(Pointer p) { super(p); }
}
/** Represents an n-dimensional array of values. */
@Namespace("tensorflow") @NoOffset public static class Tensor extends AbstractTensor {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Tensor(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public Tensor(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public Tensor position(long position) {
return (Tensor)super.position(position);
}
/** Default Tensor constructor. Creates a 1-dimension, 0-element float tensor. */
///
public Tensor() { super((Pointer)null); allocate(); }
private native void allocate();
/** \brief Creates a Tensor of the given {@code type} and {@code shape}. If
* LogMemory::IsEnabled() the allocation is logged as coming from
* an unknown kernel and step. Calling the Tensor constructor
* directly from within an Op is deprecated: use the
* OpKernelConstruction/OpKernelContext allocate_* methods to
* allocate a new tensor, which record the kernel and step.
*
* The underlying buffer is allocated using a {@code CPUAllocator}. */
///
public Tensor(@Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape) { super((Pointer)null); allocate(type, shape); }
private native void allocate(@Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape);
/** \brief Creates a tensor with the input {@code type} and {@code shape}, using
* the allocator {@code a} to allocate the underlying buffer. If
* LogMemory::IsEnabled() the allocation is logged as coming from
* an unknown kernel and step. Calling the Tensor constructor
* directly from within an Op is deprecated: use the
* OpKernelConstruction/OpKernelContext allocate_* methods to
* allocate a new tensor, which record the kernel and step.
*
* {@code a} must outlive the lifetime of this Tensor. */
///
public Tensor(Allocator a, @Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape) { super((Pointer)null); allocate(a, type, shape); }
private native void allocate(Allocator a, @Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape);
/** \brief Creates a tensor with the input {@code type} and {@code shape}, using
* the allocator {@code a} and the specified "allocation_attr" to
* allocate the underlying buffer. If the kernel and step are known
* allocation_attr.allocation_will_be_logged should be set to true
* and LogMemory::RecordTensorAllocation should be called after the
* tensor is constructed. Calling the Tensor constructor directly
* from within an Op is deprecated: use the
* OpKernelConstruction/OpKernelContext allocate_* methods to
* allocate a new tensor, which record the kernel and step.
*
* {@code a} must outlive the lifetime of this Tensor. */
public Tensor(Allocator a, @Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape,
@Const @ByRef AllocationAttributes allocation_attr) { super((Pointer)null); allocate(a, type, shape, allocation_attr); }
private native void allocate(Allocator a, @Cast("tensorflow::DataType") int type, @Const @ByRef TensorShape shape,
@Const @ByRef AllocationAttributes allocation_attr);
/** Creates an uninitialized Tensor of the given data type. */
public Tensor(@Cast("tensorflow::DataType") int type) { super((Pointer)null); allocate(type); }
private native void allocate(@Cast("tensorflow::DataType") int type);
public Tensor(@Const @ByRef Tensor other) { super((Pointer)null); allocate(other); }
private native void allocate(@Const @ByRef Tensor other); /** Copy constructor. */
/** Returns the data type. */
public native @Cast("tensorflow::DataType") int dtype();
/** Returns the shape of the tensor. */
///
public native @Const @ByRef TensorShape shape();
/** \brief Convenience accessor for the tensor shape.
*
* For all shape accessors, see comments for relevant methods of
* {@code TensorShape} in {@code tensor_shape.h}. */
public native int dims();
/** Convenience accessor for the tensor shape. */
public native @Cast("tensorflow::int64") long dim_size(int d);
/** Convenience accessor for the tensor shape. */
public native @Cast("tensorflow::int64") long NumElements();
public native @Cast("bool") boolean IsSameSize(@Const @ByRef Tensor b);
// True iff the two tensors use the same underlying refcounted storage
public native @Cast("bool") boolean SharesBufferWith(@Const @ByRef Tensor b);
// The BufferHash of two tensors are equal when they share the same
// underlying refcounted storage
public native @Cast("size_t") long BufferHash();
/** Has this Tensor been initialized? */
public native @Cast("bool") boolean IsInitialized();
/** Returns the estimated memory usage of this tensor. */
public native @Cast("size_t") long TotalBytes();
/** Returns true iff this tensor is aligned. */
public native @Cast("bool") boolean IsAligned();
/** Assign operator. This tensor shares other's underlying storage. */
///
public native @ByRef @Name("operator =") Tensor put(@Const @ByRef Tensor other);
/** \brief Copy the other tensor into this tensor and reshape it.
*
* This tensor shares other's underlying storage. Returns {@code true}
* iff {@code other.shape()} has the same number of elements of the given
* {@code shape}. */
///
///
public native @Cast("bool") boolean CopyFrom(@Const @ByRef Tensor other,
@Const @ByRef TensorShape shape);
/** \brief Slice this tensor along the 1st dimension.
* I.e., the returned tensor satisfies
* returned[i, ...] == this[dim0_start + i, ...].
* The returned tensor shares the underlying tensor buffer with this
* tensor.
*
* NOTE: The returned tensor may not satisfies the same alignment
* requirement as this tensor depending on the shape. The caller
* must check the returned tensor's alignment before calling certain
* methods that have alignment requirement (e.g., {@code flat()}, {@code tensor()}).
*
* REQUIRES: {@code dims()} >= 1
* REQUIRES: {@code 0 <= dim0_start <= dim0_limit <= dim_size(0)} */
public native @ByVal Tensor Slice(@Cast("tensorflow::int64") long dim0_start, @Cast("tensorflow::int64") long dim0_limit);
/** \brief Parse {@code other} and construct the tensor.
* Returns {@code true} iff the parsing succeeds. If the parsing fails,
* the state of {@code *this} is unchanged. */
public native @Cast("bool") boolean FromProto(@Const @ByRef TensorProto other);
///
public native @Cast("bool") boolean FromProto(Allocator a, @Const @ByRef TensorProto other);
/** \brief Fills in {@code proto} with {@code *this} tensor's content.
*
* {@code AsProtoField()} fills in the repeated field for {@code proto.dtype()}, while
* {@code AsProtoTensorContent()} encodes the content in {@code proto.tensor_content()}
* in a compact form. */
public native void AsProtoField(TensorProto proto);
///
///
///
///
///
public native void AsProtoTensorContent(TensorProto proto);
/** \brief Return the tensor data as an {@code Eigen::Tensor} with the type and
* sizes of this {@code Tensor}.
*
* Use these methods when you know the data type and the number of
* dimensions of the Tensor and you want an {@code Eigen::Tensor}
* automatically sized to the {@code Tensor} sizes. The implementation check
* fails if either type or sizes mismatch.
*
* Example:
*
*
{@code c++
*
* typedef float T;
* Tensor my_mat(...built with Shape{rows: 3, cols: 5}...);
* auto mat = my_mat.matrix(); // 2D Eigen::Tensor, 3 x 5.
* auto mat = my_mat.tensor(); // 2D Eigen::Tensor, 3 x 5.
* auto vec = my_mat.vec(); // CHECK fails as my_mat is 2D.
* auto vec = my_mat.tensor(); // CHECK fails as my_mat is 2D.
* auto mat = my_mat.matrix();// CHECK fails as type mismatch.
*
* }
*/
/** \brief Return the tensor data as an {@code Eigen::Tensor} of the data type and a
* specified shape.
*
* These methods allow you to access the data with the dimensions
* and sizes of your choice. You do not need to know the number of
* dimensions of the Tensor to call them. However, they {@code CHECK} that
* the type matches and the dimensions requested creates an
* {@code Eigen::Tensor} with the same number of elements as the tensor.
*
* Example:
*
* {@code c++
*
* typedef float T;
* Tensor my_ten(...built with Shape{planes: 4, rows: 3, cols: 5}...);
* // 1D Eigen::Tensor, size 60:
* auto flat = my_ten.flat();
* // 2D Eigen::Tensor 12 x 5:
* auto inner = my_ten.flat_inner_dims();
* // 2D Eigen::Tensor 4 x 15:
* auto outer = my_ten.shaped({4, 15});
* // CHECK fails, bad num elements:
* auto outer = my_ten.shaped({4, 8});
* // 3D Eigen::Tensor 6 x 5 x 2:
* auto weird = my_ten.shaped({6, 5, 2});
* // CHECK fails, type mismatch:
* auto bad = my_ten.flat();
*
* }
*/
/** Returns the data as an Eigen::Tensor with NDIMS dimensions, collapsing all
* Tensor dimensions but the last NDIMS-1 into the first dimension of the
* result. If NDIMS > dims() then leading dimensions of size 1 will be
* added to make the output rank NDIMS. */
/** Returns the data as an Eigen::Tensor with NDIMS dimensions, collapsing all
* Tensor dimensions but the first NDIMS-1 into the last dimension of the
* result. If NDIMS > dims() then trailing dimensions of size 1 will be
* added to make the output rank NDIMS. */
/** \brief Return the Tensor data as a {@code TensorMap} of fixed size 1:
* {@code TensorMap>}.
* Using {@code scalar()} allows the compiler to perform optimizations as
* the size of the tensor is known at compile time. */
/** Const versions of all the methods above. */
/** Render the first {@code max_entries} values in {@code *this} into a string. */
public native @StdString BytePointer SummarizeValue(@Cast("tensorflow::int64") long max_entries);
/** A human-readable summary of the tensor suitable for debugging. */
public native @StdString BytePointer DebugString();
/** Fill in the {@code TensorDescription} proto with metadata about the
* tensor that is useful for monitoring and debugging. */
///
///
///
public native void FillDescription(TensorDescription description);
/** \brief Returns a {@code StringPiece} mapping the current tensor's buffer.
*
* The returned {@code StringPiece} may point to memory location on devices
* that the CPU cannot address directly.
*
* NOTE: The underlying tensor buffer is refcounted, so the lifetime
* of the contents mapped by the {@code StringPiece} matches the lifetime of
* the buffer; callers should arrange to make sure the buffer does
* not get destroyed while the {@code StringPiece} is still used.
*
* REQUIRES: {@code DataTypeCanUseMemcpy(dtype())}. */
///
public native @StringPiece BytePointer tensor_data();
/** Copy the other tensor into this tensor and reshape it and reinterpret the
* buffer's datatype.
*
* This tensor shares other's underlying storage. */
public native void UnsafeCopyFromInternal(@Const @ByRef Tensor arg0, @Const @ByRef TensorShape arg1);
}
// Implementation details
// Interface to access the raw ref-counted data buffer.
@Namespace("tensorflow") public static class TensorBuffer extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TensorBuffer(Pointer p) { super(p); }
// data() points to a memory region of size() bytes.
public native Pointer data();
public native @Cast("size_t") long size();
// If this TensorBuffer is sub-buffer of another TensorBuffer,
// returns that TensorBuffer. Otherwise, returns this.
public native TensorBuffer root_buffer();
// Fill metadata about the allocation into the proto.
public native void FillAllocationDescription(
AllocationDescription proto);
}
// namespace tensorflow
// #endif // TENSORFLOW_CORE_FRAMEWORK_TENSOR_H_
// Parsed from tensorflow/core/framework/attr_value.pb.h
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/framework/attr_value.proto
// #ifndef PROTOBUF_tensorflow_2fcore_2fframework_2fattr_5fvalue_2eproto__INCLUDED
// #define PROTOBUF_tensorflow_2fcore_2fframework_2fattr_5fvalue_2eproto__INCLUDED
// #include
// #include
// #if GOOGLE_PROTOBUF_VERSION < 3000000
// #error This file was generated by a newer version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please update
// #error your headers.
// #endif
// #if 3000000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
// #error This file was generated by an older version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please
// #error regenerate this file with a newer version of protoc.
// #endif
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include "tensorflow/core/framework/tensor.pb.h"
// #include "tensorflow/core/framework/tensor_shape.pb.h"
// #include "tensorflow/core/framework/types.pb.h"
// @@protoc_insertion_point(includes)
// Internal implementation detail -- do not call these.
@Namespace("tensorflow") public static native void protobuf_AddDesc_tensorflow_2fcore_2fframework_2fattr_5fvalue_2eproto();
@Namespace("tensorflow") public static native void protobuf_AssignDesc_tensorflow_2fcore_2fframework_2fattr_5fvalue_2eproto();
@Namespace("tensorflow") public static native void protobuf_ShutdownFile_tensorflow_2fcore_2fframework_2fattr_5fvalue_2eproto();
// ===================================================================
@Namespace("tensorflow") @NoOffset public static class AttrValue_ListValue extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public AttrValue_ListValue(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public AttrValue_ListValue(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public AttrValue_ListValue position(long position) {
return (AttrValue_ListValue)super.position(position);
}
public AttrValue_ListValue() { super((Pointer)null); allocate(); }
private native void allocate();
public AttrValue_ListValue(@Const @ByRef AttrValue_ListValue from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef AttrValue_ListValue from);
public native @ByRef @Name("operator =") AttrValue_ListValue put(@Const @ByRef AttrValue_ListValue from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef AttrValue_ListValue default_instance();
public native void Swap(AttrValue_ListValue other);
// implements Message ----------------------------------------------
public native AttrValue_ListValue New();
public native AttrValue_ListValue New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef AttrValue_ListValue from);
public native void MergeFrom(@Const @ByRef AttrValue_ListValue from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// repeated bytes s = 2;
public native int s_size();
public native void clear_s();
@MemberGetter public static native int kSFieldNumber();
public static final int kSFieldNumber = kSFieldNumber();
public native @StdString BytePointer s(int index);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_s(int index);
public native void set_s(int index, @StdString BytePointer value);
public native void set_s(int index, @StdString String value);
public native void set_s(int index, @Const Pointer value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_s();
public native void add_s(@StdString BytePointer value);
public native void add_s(@StdString String value);
public native void add_s(@Const Pointer value, @Cast("size_t") long size);
// repeated int64 i = 3 [packed = true];
public native int i_size();
public native void clear_i();
@MemberGetter public static native int kIFieldNumber();
public static final int kIFieldNumber = kIFieldNumber();
public native @Cast("google::protobuf::int64") long i(int index);
public native void set_i(int index, @Cast("google::protobuf::int64") long value);
public native void add_i(@Cast("google::protobuf::int64") long value);
// repeated float f = 4 [packed = true];
public native int f_size();
public native void clear_f();
@MemberGetter public static native int kFFieldNumber();
public static final int kFFieldNumber = kFFieldNumber();
public native float f(int index);
public native void set_f(int index, float value);
public native void add_f(float value);
// repeated bool b = 5 [packed = true];
public native int b_size();
public native void clear_b();
@MemberGetter public static native int kBFieldNumber();
public static final int kBFieldNumber = kBFieldNumber();
public native @Cast("bool") boolean b(int index);
public native void set_b(int index, @Cast("bool") boolean value);
public native void add_b(@Cast("bool") boolean value);
// repeated .tensorflow.DataType type = 6 [packed = true];
public native int type_size();
public native void clear_type();
@MemberGetter public static native int kTypeFieldNumber();
public static final int kTypeFieldNumber = kTypeFieldNumber();
public native @Cast("tensorflow::DataType") int type(int index);
public native void set_type(int index, @Cast("tensorflow::DataType") int value);
public native void add_type(@Cast("tensorflow::DataType") int value);
// repeated .tensorflow.TensorShapeProto shape = 7;
public native int shape_size();
public native void clear_shape();
@MemberGetter public static native int kShapeFieldNumber();
public static final int kShapeFieldNumber = kShapeFieldNumber();
public native @Const @ByRef TensorShapeProto shape(int index);
public native TensorShapeProto mutable_shape(int index);
public native TensorShapeProto add_shape();
// repeated .tensorflow.TensorProto tensor = 8;
public native int tensor_size();
public native void clear_tensor();
@MemberGetter public static native int kTensorFieldNumber();
public static final int kTensorFieldNumber = kTensorFieldNumber();
public native @Const @ByRef TensorProto tensor(int index);
public native TensorProto mutable_tensor(int index);
public native TensorProto add_tensor();
}
// -------------------------------------------------------------------
@Namespace("tensorflow") @NoOffset public static class AttrValue extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public AttrValue(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public AttrValue(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public AttrValue position(long position) {
return (AttrValue)super.position(position);
}
public AttrValue() { super((Pointer)null); allocate(); }
private native void allocate();
public AttrValue(@Const @ByRef AttrValue from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef AttrValue from);
public native @ByRef @Name("operator =") AttrValue put(@Const @ByRef AttrValue from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef AttrValue default_instance();
/** enum tensorflow::AttrValue::ValueCase */
public static final int
kS = 2,
kI = 3,
kF = 4,
kB = 5,
kType = 6,
kShape = 7,
kTensor = 8,
kList = 1,
kFunc = 10,
kPlaceholder = 9,
VALUE_NOT_SET = 0;
public native void Swap(AttrValue other);
// implements Message ----------------------------------------------
public native AttrValue New();
public native AttrValue New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef AttrValue from);
public native void MergeFrom(@Const @ByRef AttrValue from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
public native void clear_s();
@MemberGetter public static native int kSFieldNumber();
public static final int kSFieldNumber = kSFieldNumber();
public native @StdString BytePointer s();
public native void set_s(@StdString BytePointer value);
public native void set_s(@StdString String value);
public native void set_s(@Const Pointer value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_s();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_s();
public native void set_allocated_s(@StdString @Cast({"char*", "std::string*"}) BytePointer s);
public native void clear_i();
@MemberGetter public static native int kIFieldNumber();
public static final int kIFieldNumber = kIFieldNumber();
public native @Cast("google::protobuf::int64") long i();
public native void set_i(@Cast("google::protobuf::int64") long value);
public native void clear_f();
@MemberGetter public static native int kFFieldNumber();
public static final int kFFieldNumber = kFFieldNumber();
public native float f();
public native void set_f(float value);
public native void clear_b();
@MemberGetter public static native int kBFieldNumber();
public static final int kBFieldNumber = kBFieldNumber();
public native @Cast("bool") boolean b();
public native void set_b(@Cast("bool") boolean value);
public native void clear_type();
@MemberGetter public static native int kTypeFieldNumber();
public static final int kTypeFieldNumber = kTypeFieldNumber();
public native @Cast("tensorflow::DataType") int type();
public native void set_type(@Cast("tensorflow::DataType") int value);
// optional .tensorflow.TensorShapeProto shape = 7;
public native @Cast("bool") boolean has_shape();
public native void clear_shape();
@MemberGetter public static native int kShapeFieldNumber();
public static final int kShapeFieldNumber = kShapeFieldNumber();
public native @Const @ByRef TensorShapeProto shape();
public native TensorShapeProto mutable_shape();
public native TensorShapeProto release_shape();
public native void set_allocated_shape(TensorShapeProto shape);
// optional .tensorflow.TensorProto tensor = 8;
public native @Cast("bool") boolean has_tensor();
public native void clear_tensor();
@MemberGetter public static native int kTensorFieldNumber();
public static final int kTensorFieldNumber = kTensorFieldNumber();
public native @Const @ByRef TensorProto tensor();
public native TensorProto mutable_tensor();
public native TensorProto release_tensor();
public native void set_allocated_tensor(TensorProto tensor);
// optional .tensorflow.AttrValue.ListValue list = 1;
public native @Cast("bool") boolean has_list();
public native void clear_list();
@MemberGetter public static native int kListFieldNumber();
public static final int kListFieldNumber = kListFieldNumber();
public native @Const @ByRef AttrValue_ListValue list();
public native AttrValue_ListValue mutable_list();
public native AttrValue_ListValue release_list();
public native void set_allocated_list(AttrValue_ListValue list);
// optional .tensorflow.NameAttrList func = 10;
public native @Cast("bool") boolean has_func();
public native void clear_func();
@MemberGetter public static native int kFuncFieldNumber();
public static final int kFuncFieldNumber = kFuncFieldNumber();
public native @Const @ByRef NameAttrList func();
public native NameAttrList mutable_func();
public native NameAttrList release_func();
public native void set_allocated_func(NameAttrList func);
public native void clear_placeholder();
@MemberGetter public static native int kPlaceholderFieldNumber();
public static final int kPlaceholderFieldNumber = kPlaceholderFieldNumber();
public native @StdString BytePointer placeholder();
public native void set_placeholder(@StdString BytePointer value);
public native void set_placeholder(@StdString String value);
public native void set_placeholder(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_placeholder(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_placeholder();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_placeholder();
public native void set_allocated_placeholder(@StdString @Cast({"char*", "std::string*"}) BytePointer placeholder);
public native @Cast("tensorflow::AttrValue::ValueCase") int value_case();
}
// -------------------------------------------------------------------
@Namespace("tensorflow") @NoOffset public static class NameAttrList extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public NameAttrList(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public NameAttrList(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public NameAttrList position(long position) {
return (NameAttrList)super.position(position);
}
public NameAttrList() { super((Pointer)null); allocate(); }
private native void allocate();
public NameAttrList(@Const @ByRef NameAttrList from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef NameAttrList from);
public native @ByRef @Name("operator =") NameAttrList put(@Const @ByRef NameAttrList from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef NameAttrList default_instance();
public native void Swap(NameAttrList other);
// implements Message ----------------------------------------------
public native NameAttrList New();
public native NameAttrList New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef NameAttrList from);
public native void MergeFrom(@Const @ByRef NameAttrList from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional string name = 1;
public native void clear_name();
@MemberGetter public static native int kNameFieldNumber();
public static final int kNameFieldNumber = kNameFieldNumber();
public native @StdString BytePointer name();
public native void set_name(@StdString BytePointer value);
public native void set_name(@StdString String value);
public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_name(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name();
public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name);
// map attr = 2;
public native int attr_size();
public native void clear_attr();
@MemberGetter public static native int kAttrFieldNumber();
public static final int kAttrFieldNumber = kAttrFieldNumber();
public native @Const @ByRef StringAttrValueMap attr();
public native StringAttrValueMap mutable_attr();
}
// ===================================================================
// ===================================================================
// #if !PROTOBUF_INLINE_NOT_IN_HEADERS
// AttrValue_ListValue
// repeated bytes s = 2;
// repeated int64 i = 3 [packed = true];
// repeated float f = 4 [packed = true];
// repeated bool b = 5 [packed = true];
// repeated .tensorflow.DataType type = 6 [packed = true];
// repeated .tensorflow.TensorShapeProto shape = 7;
// repeated .tensorflow.TensorProto tensor = 8;
// -------------------------------------------------------------------
// AttrValue
// optional bytes s = 2;
// optional int64 i = 3;
// optional float f = 4;
// optional bool b = 5;
// optional .tensorflow.DataType type = 6;
// optional .tensorflow.TensorShapeProto shape = 7;
// optional .tensorflow.TensorProto tensor = 8;
// optional .tensorflow.AttrValue.ListValue list = 1;
// optional .tensorflow.NameAttrList func = 10;
// optional string placeholder = 9;
// -------------------------------------------------------------------
// NameAttrList
// optional string name = 1;
// map attr = 2;
// #endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// @@protoc_insertion_point(namespace_scope)
// namespace tensorflow
// @@protoc_insertion_point(global_scope)
// #endif // PROTOBUF_tensorflow_2fcore_2fframework_2fattr_5fvalue_2eproto__INCLUDED
// Parsed from tensorflow/core/framework/op_def.pb.h
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/framework/op_def.proto
// #ifndef PROTOBUF_tensorflow_2fcore_2fframework_2fop_5fdef_2eproto__INCLUDED
// #define PROTOBUF_tensorflow_2fcore_2fframework_2fop_5fdef_2eproto__INCLUDED
// #include
// #include
// #if GOOGLE_PROTOBUF_VERSION < 3000000
// #error This file was generated by a newer version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please update
// #error your headers.
// #endif
// #if 3000000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
// #error This file was generated by an older version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please
// #error regenerate this file with a newer version of protoc.
// #endif
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include "tensorflow/core/framework/attr_value.pb.h"
// #include "tensorflow/core/framework/types.pb.h"
// @@protoc_insertion_point(includes)
// Internal implementation detail -- do not call these.
@Namespace("tensorflow") public static native void protobuf_AddDesc_tensorflow_2fcore_2fframework_2fop_5fdef_2eproto();
@Namespace("tensorflow") public static native void protobuf_AssignDesc_tensorflow_2fcore_2fframework_2fop_5fdef_2eproto();
@Namespace("tensorflow") public static native void protobuf_ShutdownFile_tensorflow_2fcore_2fframework_2fop_5fdef_2eproto();
// ===================================================================
@Namespace("tensorflow") @NoOffset public static class OpDef_ArgDef extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public OpDef_ArgDef(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public OpDef_ArgDef(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public OpDef_ArgDef position(long position) {
return (OpDef_ArgDef)super.position(position);
}
public OpDef_ArgDef() { super((Pointer)null); allocate(); }
private native void allocate();
public OpDef_ArgDef(@Const @ByRef OpDef_ArgDef from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef OpDef_ArgDef from);
public native @ByRef @Name("operator =") OpDef_ArgDef put(@Const @ByRef OpDef_ArgDef from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef OpDef_ArgDef default_instance();
public native void Swap(OpDef_ArgDef other);
// implements Message ----------------------------------------------
public native OpDef_ArgDef New();
public native OpDef_ArgDef New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef OpDef_ArgDef from);
public native void MergeFrom(@Const @ByRef OpDef_ArgDef from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional string name = 1;
public native void clear_name();
@MemberGetter public static native int kNameFieldNumber();
public static final int kNameFieldNumber = kNameFieldNumber();
public native @StdString BytePointer name();
public native void set_name(@StdString BytePointer value);
public native void set_name(@StdString String value);
public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_name(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name();
public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name);
// optional string description = 2;
public native void clear_description();
@MemberGetter public static native int kDescriptionFieldNumber();
public static final int kDescriptionFieldNumber = kDescriptionFieldNumber();
public native @StdString BytePointer description();
public native void set_description(@StdString BytePointer value);
public native void set_description(@StdString String value);
public native void set_description(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_description(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_description();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_description();
public native void set_allocated_description(@StdString @Cast({"char*", "std::string*"}) BytePointer description);
// optional .tensorflow.DataType type = 3;
public native void clear_type();
@MemberGetter public static native int kTypeFieldNumber();
public static final int kTypeFieldNumber = kTypeFieldNumber();
public native @Cast("tensorflow::DataType") int type();
public native void set_type(@Cast("tensorflow::DataType") int value);
// optional string type_attr = 4;
public native void clear_type_attr();
@MemberGetter public static native int kTypeAttrFieldNumber();
public static final int kTypeAttrFieldNumber = kTypeAttrFieldNumber();
public native @StdString BytePointer type_attr();
public native void set_type_attr(@StdString BytePointer value);
public native void set_type_attr(@StdString String value);
public native void set_type_attr(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_type_attr(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_type_attr();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_type_attr();
public native void set_allocated_type_attr(@StdString @Cast({"char*", "std::string*"}) BytePointer type_attr);
// optional string number_attr = 5;
public native void clear_number_attr();
@MemberGetter public static native int kNumberAttrFieldNumber();
public static final int kNumberAttrFieldNumber = kNumberAttrFieldNumber();
public native @StdString BytePointer number_attr();
public native void set_number_attr(@StdString BytePointer value);
public native void set_number_attr(@StdString String value);
public native void set_number_attr(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_number_attr(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_number_attr();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_number_attr();
public native void set_allocated_number_attr(@StdString @Cast({"char*", "std::string*"}) BytePointer number_attr);
// optional string type_list_attr = 6;
public native void clear_type_list_attr();
@MemberGetter public static native int kTypeListAttrFieldNumber();
public static final int kTypeListAttrFieldNumber = kTypeListAttrFieldNumber();
public native @StdString BytePointer type_list_attr();
public native void set_type_list_attr(@StdString BytePointer value);
public native void set_type_list_attr(@StdString String value);
public native void set_type_list_attr(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_type_list_attr(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_type_list_attr();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_type_list_attr();
public native void set_allocated_type_list_attr(@StdString @Cast({"char*", "std::string*"}) BytePointer type_list_attr);
// optional bool is_ref = 16;
public native void clear_is_ref();
@MemberGetter public static native int kIsRefFieldNumber();
public static final int kIsRefFieldNumber = kIsRefFieldNumber();
public native @Cast("bool") boolean is_ref();
public native void set_is_ref(@Cast("bool") boolean value);
}
// -------------------------------------------------------------------
@Namespace("tensorflow") @NoOffset public static class OpDef_AttrDef extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public OpDef_AttrDef(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public OpDef_AttrDef(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public OpDef_AttrDef position(long position) {
return (OpDef_AttrDef)super.position(position);
}
public OpDef_AttrDef() { super((Pointer)null); allocate(); }
private native void allocate();
public OpDef_AttrDef(@Const @ByRef OpDef_AttrDef from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef OpDef_AttrDef from);
public native @ByRef @Name("operator =") OpDef_AttrDef put(@Const @ByRef OpDef_AttrDef from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef OpDef_AttrDef default_instance();
public native void Swap(OpDef_AttrDef other);
// implements Message ----------------------------------------------
public native OpDef_AttrDef New();
public native OpDef_AttrDef New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef OpDef_AttrDef from);
public native void MergeFrom(@Const @ByRef OpDef_AttrDef from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional string name = 1;
public native void clear_name();
@MemberGetter public static native int kNameFieldNumber();
public static final int kNameFieldNumber = kNameFieldNumber();
public native @StdString BytePointer name();
public native void set_name(@StdString BytePointer value);
public native void set_name(@StdString String value);
public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_name(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name();
public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name);
// optional string type = 2;
public native void clear_type();
@MemberGetter public static native int kTypeFieldNumber();
public static final int kTypeFieldNumber = kTypeFieldNumber();
public native @StdString BytePointer type();
public native void set_type(@StdString BytePointer value);
public native void set_type(@StdString String value);
public native void set_type(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_type(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_type();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_type();
public native void set_allocated_type(@StdString @Cast({"char*", "std::string*"}) BytePointer type);
// optional .tensorflow.AttrValue default_value = 3;
public native @Cast("bool") boolean has_default_value();
public native void clear_default_value();
@MemberGetter public static native int kDefaultValueFieldNumber();
public static final int kDefaultValueFieldNumber = kDefaultValueFieldNumber();
public native @Const @ByRef AttrValue default_value();
public native AttrValue mutable_default_value();
public native AttrValue release_default_value();
public native void set_allocated_default_value(AttrValue default_value);
// optional string description = 4;
public native void clear_description();
@MemberGetter public static native int kDescriptionFieldNumber();
public static final int kDescriptionFieldNumber = kDescriptionFieldNumber();
public native @StdString BytePointer description();
public native void set_description(@StdString BytePointer value);
public native void set_description(@StdString String value);
public native void set_description(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_description(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_description();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_description();
public native void set_allocated_description(@StdString @Cast({"char*", "std::string*"}) BytePointer description);
// optional bool has_minimum = 5;
public native void clear_has_minimum();
@MemberGetter public static native int kHasMinimumFieldNumber();
public static final int kHasMinimumFieldNumber = kHasMinimumFieldNumber();
public native @Cast("bool") boolean has_minimum();
public native void set_has_minimum(@Cast("bool") boolean value);
// optional int64 minimum = 6;
public native void clear_minimum();
@MemberGetter public static native int kMinimumFieldNumber();
public static final int kMinimumFieldNumber = kMinimumFieldNumber();
public native @Cast("google::protobuf::int64") long minimum();
public native void set_minimum(@Cast("google::protobuf::int64") long value);
// optional .tensorflow.AttrValue allowed_values = 7;
public native @Cast("bool") boolean has_allowed_values();
public native void clear_allowed_values();
@MemberGetter public static native int kAllowedValuesFieldNumber();
public static final int kAllowedValuesFieldNumber = kAllowedValuesFieldNumber();
public native @Const @ByRef AttrValue allowed_values();
public native AttrValue mutable_allowed_values();
public native AttrValue release_allowed_values();
public native void set_allocated_allowed_values(AttrValue allowed_values);
}
// -------------------------------------------------------------------
@Namespace("tensorflow") @NoOffset public static class OpDef extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public OpDef(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public OpDef(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public OpDef position(long position) {
return (OpDef)super.position(position);
}
public OpDef() { super((Pointer)null); allocate(); }
private native void allocate();
public OpDef(@Const @ByRef OpDef from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef OpDef from);
public native @ByRef @Name("operator =") OpDef put(@Const @ByRef OpDef from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef OpDef default_instance();
public native void Swap(OpDef other);
// implements Message ----------------------------------------------
public native OpDef New();
public native OpDef New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef OpDef from);
public native void MergeFrom(@Const @ByRef OpDef from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional string name = 1;
public native void clear_name();
@MemberGetter public static native int kNameFieldNumber();
public static final int kNameFieldNumber = kNameFieldNumber();
public native @StdString BytePointer name();
public native void set_name(@StdString BytePointer value);
public native void set_name(@StdString String value);
public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_name(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name();
public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name);
// repeated .tensorflow.OpDef.ArgDef input_arg = 2;
public native int input_arg_size();
public native void clear_input_arg();
@MemberGetter public static native int kInputArgFieldNumber();
public static final int kInputArgFieldNumber = kInputArgFieldNumber();
public native @Const @ByRef OpDef_ArgDef input_arg(int index);
public native OpDef_ArgDef mutable_input_arg(int index);
public native OpDef_ArgDef add_input_arg();
// repeated .tensorflow.OpDef.ArgDef output_arg = 3;
public native int output_arg_size();
public native void clear_output_arg();
@MemberGetter public static native int kOutputArgFieldNumber();
public static final int kOutputArgFieldNumber = kOutputArgFieldNumber();
public native @Const @ByRef OpDef_ArgDef output_arg(int index);
public native OpDef_ArgDef mutable_output_arg(int index);
public native OpDef_ArgDef add_output_arg();
// repeated .tensorflow.OpDef.AttrDef attr = 4;
public native int attr_size();
public native void clear_attr();
@MemberGetter public static native int kAttrFieldNumber();
public static final int kAttrFieldNumber = kAttrFieldNumber();
public native @Const @ByRef OpDef_AttrDef attr(int index);
public native OpDef_AttrDef mutable_attr(int index);
public native OpDef_AttrDef add_attr();
// optional string summary = 5;
public native void clear_summary();
@MemberGetter public static native int kSummaryFieldNumber();
public static final int kSummaryFieldNumber = kSummaryFieldNumber();
public native @StdString BytePointer summary();
public native void set_summary(@StdString BytePointer value);
public native void set_summary(@StdString String value);
public native void set_summary(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_summary(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_summary();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_summary();
public native void set_allocated_summary(@StdString @Cast({"char*", "std::string*"}) BytePointer summary);
// optional string description = 6;
public native void clear_description();
@MemberGetter public static native int kDescriptionFieldNumber();
public static final int kDescriptionFieldNumber = kDescriptionFieldNumber();
public native @StdString BytePointer description();
public native void set_description(@StdString BytePointer value);
public native void set_description(@StdString String value);
public native void set_description(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_description(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_description();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_description();
public native void set_allocated_description(@StdString @Cast({"char*", "std::string*"}) BytePointer description);
// optional bool is_commutative = 18;
public native void clear_is_commutative();
@MemberGetter public static native int kIsCommutativeFieldNumber();
public static final int kIsCommutativeFieldNumber = kIsCommutativeFieldNumber();
public native @Cast("bool") boolean is_commutative();
public native void set_is_commutative(@Cast("bool") boolean value);
// optional bool is_aggregate = 16;
public native void clear_is_aggregate();
@MemberGetter public static native int kIsAggregateFieldNumber();
public static final int kIsAggregateFieldNumber = kIsAggregateFieldNumber();
public native @Cast("bool") boolean is_aggregate();
public native void set_is_aggregate(@Cast("bool") boolean value);
// optional bool is_stateful = 17;
public native void clear_is_stateful();
@MemberGetter public static native int kIsStatefulFieldNumber();
public static final int kIsStatefulFieldNumber = kIsStatefulFieldNumber();
public native @Cast("bool") boolean is_stateful();
public native void set_is_stateful(@Cast("bool") boolean value);
// optional bool allows_uninitialized_input = 19;
public native void clear_allows_uninitialized_input();
@MemberGetter public static native int kAllowsUninitializedInputFieldNumber();
public static final int kAllowsUninitializedInputFieldNumber = kAllowsUninitializedInputFieldNumber();
public native @Cast("bool") boolean allows_uninitialized_input();
public native void set_allows_uninitialized_input(@Cast("bool") boolean value);
}
// -------------------------------------------------------------------
@Namespace("tensorflow") @NoOffset public static class OpList extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public OpList(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public OpList(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public OpList position(long position) {
return (OpList)super.position(position);
}
public OpList() { super((Pointer)null); allocate(); }
private native void allocate();
public OpList(@Const @ByRef OpList from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef OpList from);
public native @ByRef @Name("operator =") OpList put(@Const @ByRef OpList from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef OpList default_instance();
public native void Swap(OpList other);
// implements Message ----------------------------------------------
public native OpList New();
public native OpList New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef OpList from);
public native void MergeFrom(@Const @ByRef OpList from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// repeated .tensorflow.OpDef op = 1;
public native int op_size();
public native void clear_op();
@MemberGetter public static native int kOpFieldNumber();
public static final int kOpFieldNumber = kOpFieldNumber();
public native @Const @ByRef OpDef op(int index);
public native OpDef mutable_op(int index);
public native OpDef add_op();
}
// ===================================================================
// ===================================================================
// #if !PROTOBUF_INLINE_NOT_IN_HEADERS
// OpDef_ArgDef
// optional string name = 1;
// optional string description = 2;
// optional .tensorflow.DataType type = 3;
// optional string type_attr = 4;
// optional string number_attr = 5;
// optional string type_list_attr = 6;
// optional bool is_ref = 16;
// -------------------------------------------------------------------
// OpDef_AttrDef
// optional string name = 1;
// optional string type = 2;
// optional .tensorflow.AttrValue default_value = 3;
// optional string description = 4;
// optional bool has_minimum = 5;
// optional int64 minimum = 6;
// optional .tensorflow.AttrValue allowed_values = 7;
// -------------------------------------------------------------------
// OpDef
// optional string name = 1;
// repeated .tensorflow.OpDef.ArgDef input_arg = 2;
// repeated .tensorflow.OpDef.ArgDef output_arg = 3;
// repeated .tensorflow.OpDef.AttrDef attr = 4;
// optional string summary = 5;
// optional string description = 6;
// optional bool is_commutative = 18;
// optional bool is_aggregate = 16;
// optional bool is_stateful = 17;
// optional bool allows_uninitialized_input = 19;
// -------------------------------------------------------------------
// OpList
// repeated .tensorflow.OpDef op = 1;
// #endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// @@protoc_insertion_point(namespace_scope)
// namespace tensorflow
// @@protoc_insertion_point(global_scope)
// #endif // PROTOBUF_tensorflow_2fcore_2fframework_2fop_5fdef_2eproto__INCLUDED
// Parsed from tensorflow/core/framework/function.pb.h
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/framework/function.proto
// #ifndef PROTOBUF_tensorflow_2fcore_2fframework_2ffunction_2eproto__INCLUDED
// #define PROTOBUF_tensorflow_2fcore_2fframework_2ffunction_2eproto__INCLUDED
// #include
// #include
// #if GOOGLE_PROTOBUF_VERSION < 3000000
// #error This file was generated by a newer version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please update
// #error your headers.
// #endif
// #if 3000000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
// #error This file was generated by an older version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please
// #error regenerate this file with a newer version of protoc.
// #endif
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include "tensorflow/core/framework/attr_value.pb.h"
// #include "tensorflow/core/framework/op_def.pb.h"
// @@protoc_insertion_point(includes)
// Internal implementation detail -- do not call these.
@Namespace("tensorflow") public static native void protobuf_AddDesc_tensorflow_2fcore_2fframework_2ffunction_2eproto();
@Namespace("tensorflow") public static native void protobuf_AssignDesc_tensorflow_2fcore_2fframework_2ffunction_2eproto();
@Namespace("tensorflow") public static native void protobuf_ShutdownFile_tensorflow_2fcore_2fframework_2ffunction_2eproto();
// ===================================================================
@Namespace("tensorflow") @NoOffset public static class FunctionDefLibrary extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public FunctionDefLibrary(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public FunctionDefLibrary(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public FunctionDefLibrary position(long position) {
return (FunctionDefLibrary)super.position(position);
}
public FunctionDefLibrary() { super((Pointer)null); allocate(); }
private native void allocate();
public FunctionDefLibrary(@Const @ByRef FunctionDefLibrary from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef FunctionDefLibrary from);
public native @ByRef @Name("operator =") FunctionDefLibrary put(@Const @ByRef FunctionDefLibrary from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef FunctionDefLibrary default_instance();
public native void Swap(FunctionDefLibrary other);
// implements Message ----------------------------------------------
public native FunctionDefLibrary New();
public native FunctionDefLibrary New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef FunctionDefLibrary from);
public native void MergeFrom(@Const @ByRef FunctionDefLibrary from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// repeated .tensorflow.FunctionDef function = 1;
public native int function_size();
public native void clear_function();
@MemberGetter public static native int kFunctionFieldNumber();
public static final int kFunctionFieldNumber = kFunctionFieldNumber();
public native @Const @ByRef FunctionDef function(int index);
public native FunctionDef mutable_function(int index);
public native FunctionDef add_function();
// repeated .tensorflow.GradientDef gradient = 2;
public native int gradient_size();
public native void clear_gradient();
@MemberGetter public static native int kGradientFieldNumber();
public static final int kGradientFieldNumber = kGradientFieldNumber();
public native @Const @ByRef GradientDef gradient(int index);
public native GradientDef mutable_gradient(int index);
public native GradientDef add_gradient();
}
// -------------------------------------------------------------------
@Namespace("tensorflow") @NoOffset public static class FunctionDef_Node extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public FunctionDef_Node(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public FunctionDef_Node(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public FunctionDef_Node position(long position) {
return (FunctionDef_Node)super.position(position);
}
public FunctionDef_Node() { super((Pointer)null); allocate(); }
private native void allocate();
public FunctionDef_Node(@Const @ByRef FunctionDef_Node from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef FunctionDef_Node from);
public native @ByRef @Name("operator =") FunctionDef_Node put(@Const @ByRef FunctionDef_Node from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef FunctionDef_Node default_instance();
public native void Swap(FunctionDef_Node other);
// implements Message ----------------------------------------------
public native FunctionDef_Node New();
public native FunctionDef_Node New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef FunctionDef_Node from);
public native void MergeFrom(@Const @ByRef FunctionDef_Node from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// repeated string ret = 1;
public native int ret_size();
public native void clear_ret();
@MemberGetter public static native int kRetFieldNumber();
public static final int kRetFieldNumber = kRetFieldNumber();
public native @StdString BytePointer ret(int index);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_ret(int index);
public native void set_ret(int index, @StdString BytePointer value);
public native void set_ret(int index, @StdString String value);
public native void set_ret(int index, @Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_ret(int index, String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_ret();
public native void add_ret(@StdString BytePointer value);
public native void add_ret(@StdString String value);
public native void add_ret(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void add_ret(String value, @Cast("size_t") long size);
// optional string op = 2;
public native void clear_op();
@MemberGetter public static native int kOpFieldNumber();
public static final int kOpFieldNumber = kOpFieldNumber();
public native @StdString BytePointer op();
public native void set_op(@StdString BytePointer value);
public native void set_op(@StdString String value);
public native void set_op(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_op(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_op();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_op();
public native void set_allocated_op(@StdString @Cast({"char*", "std::string*"}) BytePointer op);
// repeated string arg = 3;
public native int arg_size();
public native void clear_arg();
@MemberGetter public static native int kArgFieldNumber();
public static final int kArgFieldNumber = kArgFieldNumber();
public native @StdString BytePointer arg(int index);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_arg(int index);
public native void set_arg(int index, @StdString BytePointer value);
public native void set_arg(int index, @StdString String value);
public native void set_arg(int index, @Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_arg(int index, String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_arg();
public native void add_arg(@StdString BytePointer value);
public native void add_arg(@StdString String value);
public native void add_arg(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void add_arg(String value, @Cast("size_t") long size);
// repeated string dep = 4;
public native int dep_size();
public native void clear_dep();
@MemberGetter public static native int kDepFieldNumber();
public static final int kDepFieldNumber = kDepFieldNumber();
public native @StdString BytePointer dep(int index);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_dep(int index);
public native void set_dep(int index, @StdString BytePointer value);
public native void set_dep(int index, @StdString String value);
public native void set_dep(int index, @Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_dep(int index, String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_dep();
public native void add_dep(@StdString BytePointer value);
public native void add_dep(@StdString String value);
public native void add_dep(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void add_dep(String value, @Cast("size_t") long size);
// map attr = 5;
public native int attr_size();
public native void clear_attr();
@MemberGetter public static native int kAttrFieldNumber();
public static final int kAttrFieldNumber = kAttrFieldNumber();
public native @Const @ByRef StringAttrValueMap attr();
public native StringAttrValueMap mutable_attr();
}
// -------------------------------------------------------------------
@Namespace("tensorflow") @NoOffset public static class FunctionDef extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public FunctionDef(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public FunctionDef(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public FunctionDef position(long position) {
return (FunctionDef)super.position(position);
}
public FunctionDef() { super((Pointer)null); allocate(); }
private native void allocate();
public FunctionDef(@Const @ByRef FunctionDef from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef FunctionDef from);
public native @ByRef @Name("operator =") FunctionDef put(@Const @ByRef FunctionDef from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef FunctionDef default_instance();
public native void Swap(FunctionDef other);
// implements Message ----------------------------------------------
public native FunctionDef New();
public native FunctionDef New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef FunctionDef from);
public native void MergeFrom(@Const @ByRef FunctionDef from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional .tensorflow.OpDef signature = 1;
public native @Cast("bool") boolean has_signature();
public native void clear_signature();
@MemberGetter public static native int kSignatureFieldNumber();
public static final int kSignatureFieldNumber = kSignatureFieldNumber();
public native @Const @ByRef OpDef signature();
public native OpDef mutable_signature();
public native OpDef release_signature();
public native void set_allocated_signature(OpDef signature);
// repeated .tensorflow.FunctionDef.Node node = 2;
public native int node_size();
public native void clear_node();
@MemberGetter public static native int kNodeFieldNumber();
public static final int kNodeFieldNumber = kNodeFieldNumber();
public native @Const @ByRef FunctionDef_Node node(int index);
public native FunctionDef_Node mutable_node(int index);
public native FunctionDef_Node add_node();
}
// -------------------------------------------------------------------
@Namespace("tensorflow") @NoOffset public static class GradientDef extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public GradientDef(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public GradientDef(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public GradientDef position(long position) {
return (GradientDef)super.position(position);
}
public GradientDef() { super((Pointer)null); allocate(); }
private native void allocate();
public GradientDef(@Const @ByRef GradientDef from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef GradientDef from);
public native @ByRef @Name("operator =") GradientDef put(@Const @ByRef GradientDef from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef GradientDef default_instance();
public native void Swap(GradientDef other);
// implements Message ----------------------------------------------
public native GradientDef New();
public native GradientDef New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef GradientDef from);
public native void MergeFrom(@Const @ByRef GradientDef from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional string function_name = 1;
public native void clear_function_name();
@MemberGetter public static native int kFunctionNameFieldNumber();
public static final int kFunctionNameFieldNumber = kFunctionNameFieldNumber();
public native @StdString BytePointer function_name();
public native void set_function_name(@StdString BytePointer value);
public native void set_function_name(@StdString String value);
public native void set_function_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_function_name(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_function_name();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_function_name();
public native void set_allocated_function_name(@StdString @Cast({"char*", "std::string*"}) BytePointer function_name);
// optional string gradient_func = 2;
public native void clear_gradient_func();
@MemberGetter public static native int kGradientFuncFieldNumber();
public static final int kGradientFuncFieldNumber = kGradientFuncFieldNumber();
public native @StdString BytePointer gradient_func();
public native void set_gradient_func(@StdString BytePointer value);
public native void set_gradient_func(@StdString String value);
public native void set_gradient_func(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_gradient_func(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_gradient_func();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_gradient_func();
public native void set_allocated_gradient_func(@StdString @Cast({"char*", "std::string*"}) BytePointer gradient_func);
}
// ===================================================================
// ===================================================================
// #if !PROTOBUF_INLINE_NOT_IN_HEADERS
// FunctionDefLibrary
// repeated .tensorflow.FunctionDef function = 1;
// repeated .tensorflow.GradientDef gradient = 2;
// -------------------------------------------------------------------
// FunctionDef_Node
// repeated string ret = 1;
// optional string op = 2;
// repeated string arg = 3;
// repeated string dep = 4;
// map attr = 5;
// -------------------------------------------------------------------
// FunctionDef
// optional .tensorflow.OpDef signature = 1;
// repeated .tensorflow.FunctionDef.Node node = 2;
// -------------------------------------------------------------------
// GradientDef
// optional string function_name = 1;
// optional string gradient_func = 2;
// #endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// @@protoc_insertion_point(namespace_scope)
// namespace tensorflow
// @@protoc_insertion_point(global_scope)
// #endif // PROTOBUF_tensorflow_2fcore_2fframework_2ffunction_2eproto__INCLUDED
// Parsed from tensorflow/core/framework/graph.pb.h
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/framework/graph.proto
// #ifndef PROTOBUF_tensorflow_2fcore_2fframework_2fgraph_2eproto__INCLUDED
// #define PROTOBUF_tensorflow_2fcore_2fframework_2fgraph_2eproto__INCLUDED
// #include
// #include
// #if GOOGLE_PROTOBUF_VERSION < 3000000
// #error This file was generated by a newer version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please update
// #error your headers.
// #endif
// #if 3000000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
// #error This file was generated by an older version of protoc which is
// #error incompatible with your Protocol Buffer headers. Please
// #error regenerate this file with a newer version of protoc.
// #endif
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include
// #include "tensorflow/core/framework/attr_value.pb.h"
// #include "tensorflow/core/framework/function.pb.h"
// #include "tensorflow/core/framework/versions.pb.h"
// @@protoc_insertion_point(includes)
// Internal implementation detail -- do not call these.
@Namespace("tensorflow") public static native void protobuf_AddDesc_tensorflow_2fcore_2fframework_2fgraph_2eproto();
@Namespace("tensorflow") public static native void protobuf_AssignDesc_tensorflow_2fcore_2fframework_2fgraph_2eproto();
@Namespace("tensorflow") public static native void protobuf_ShutdownFile_tensorflow_2fcore_2fframework_2fgraph_2eproto();
// ===================================================================
@Namespace("tensorflow") @NoOffset public static class GraphDef extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public GraphDef(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public GraphDef(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public GraphDef position(long position) {
return (GraphDef)super.position(position);
}
public GraphDef() { super((Pointer)null); allocate(); }
private native void allocate();
public GraphDef(@Const @ByRef GraphDef from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef GraphDef from);
public native @ByRef @Name("operator =") GraphDef put(@Const @ByRef GraphDef from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef GraphDef default_instance();
public native void Swap(GraphDef other);
// implements Message ----------------------------------------------
public native GraphDef New();
public native GraphDef New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef GraphDef from);
public native void MergeFrom(@Const @ByRef GraphDef from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// repeated .tensorflow.NodeDef node = 1;
public native int node_size();
public native void clear_node();
@MemberGetter public static native int kNodeFieldNumber();
public static final int kNodeFieldNumber = kNodeFieldNumber();
public native @Const @ByRef NodeDef node(int index);
public native NodeDef mutable_node(int index);
public native NodeDef add_node();
// optional .tensorflow.VersionDef versions = 4;
public native @Cast("bool") boolean has_versions();
public native void clear_versions();
@MemberGetter public static native int kVersionsFieldNumber();
public static final int kVersionsFieldNumber = kVersionsFieldNumber();
public native @Const @ByRef VersionDef versions();
public native VersionDef mutable_versions();
public native VersionDef release_versions();
public native void set_allocated_versions(VersionDef versions);
// optional int32 version = 3 [deprecated = true];
public native void clear_version();
@MemberGetter public static native int kVersionFieldNumber();
public static final int kVersionFieldNumber = kVersionFieldNumber();
public native @Cast("google::protobuf::int32") int version();
public native void set_version(@Cast("google::protobuf::int32") int value);
// optional .tensorflow.FunctionDefLibrary library = 2;
public native @Cast("bool") boolean has_library();
public native void clear_library();
@MemberGetter public static native int kLibraryFieldNumber();
public static final int kLibraryFieldNumber = kLibraryFieldNumber();
public native @Const @ByRef FunctionDefLibrary library();
public native FunctionDefLibrary mutable_library();
public native FunctionDefLibrary release_library();
public native void set_allocated_library(FunctionDefLibrary library);
}
// -------------------------------------------------------------------
@Namespace("tensorflow") @NoOffset public static class NodeDef extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public NodeDef(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public NodeDef(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public NodeDef position(long position) {
return (NodeDef)super.position(position);
}
public NodeDef() { super((Pointer)null); allocate(); }
private native void allocate();
public NodeDef(@Const @ByRef NodeDef from) { super((Pointer)null); allocate(from); }
private native void allocate(@Const @ByRef NodeDef from);
public native @ByRef @Name("operator =") NodeDef put(@Const @ByRef NodeDef from);
public static native @Cast("const google::protobuf::Descriptor*") Pointer descriptor();
public static native @Const @ByRef NodeDef default_instance();
public native void Swap(NodeDef other);
// implements Message ----------------------------------------------
public native NodeDef New();
public native NodeDef New(@Cast("google::protobuf::Arena*") Pointer arena);
public native void CopyFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void MergeFrom(@Cast("const google::protobuf::Message*") @ByRef Pointer from);
public native void CopyFrom(@Const @ByRef NodeDef from);
public native void MergeFrom(@Const @ByRef NodeDef from);
public native void Clear();
public native @Cast("bool") boolean IsInitialized();
public native int ByteSize();
public native @Cast("bool") boolean MergePartialFromCodedStream(
@Cast("google::protobuf::io::CodedInputStream*") Pointer input);
public native void SerializeWithCachedSizes(
@Cast("google::protobuf::io::CodedOutputStream*") Pointer output);
public native @Cast("google::protobuf::uint8*") BytePointer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") BytePointer output);
public native @Cast("google::protobuf::uint8*") ByteBuffer SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") ByteBuffer output);
public native @Cast("google::protobuf::uint8*") byte[] SerializeWithCachedSizesToArray(@Cast("google::protobuf::uint8*") byte[] output);
public native int GetCachedSize();
public native @ByVal @Cast("google::protobuf::Metadata*") Pointer GetMetadata();
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
// optional string name = 1;
public native void clear_name();
@MemberGetter public static native int kNameFieldNumber();
public static final int kNameFieldNumber = kNameFieldNumber();
public native @StdString BytePointer name();
public native void set_name(@StdString BytePointer value);
public native void set_name(@StdString String value);
public native void set_name(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_name(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_name();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_name();
public native void set_allocated_name(@StdString @Cast({"char*", "std::string*"}) BytePointer name);
// optional string op = 2;
public native void clear_op();
@MemberGetter public static native int kOpFieldNumber();
public static final int kOpFieldNumber = kOpFieldNumber();
public native @StdString BytePointer op();
public native void set_op(@StdString BytePointer value);
public native void set_op(@StdString String value);
public native void set_op(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_op(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_op();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_op();
public native void set_allocated_op(@StdString @Cast({"char*", "std::string*"}) BytePointer op);
// repeated string input = 3;
public native int input_size();
public native void clear_input();
@MemberGetter public static native int kInputFieldNumber();
public static final int kInputFieldNumber = kInputFieldNumber();
public native @StdString BytePointer input(int index);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_input(int index);
public native void set_input(int index, @StdString BytePointer value);
public native void set_input(int index, @StdString String value);
public native void set_input(int index, @Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_input(int index, String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer add_input();
public native void add_input(@StdString BytePointer value);
public native void add_input(@StdString String value);
public native void add_input(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void add_input(String value, @Cast("size_t") long size);
// optional string device = 4;
public native void clear_device();
@MemberGetter public static native int kDeviceFieldNumber();
public static final int kDeviceFieldNumber = kDeviceFieldNumber();
public native @StdString BytePointer device();
public native void set_device(@StdString BytePointer value);
public native void set_device(@StdString String value);
public native void set_device(@Cast("const char*") BytePointer value, @Cast("size_t") long size);
public native void set_device(String value, @Cast("size_t") long size);
public native @StdString @Cast({"char*", "std::string*"}) BytePointer mutable_device();
public native @StdString @Cast({"char*", "std::string*"}) BytePointer release_device();
public native void set_allocated_device(@StdString @Cast({"char*", "std::string*"}) BytePointer device);
// map attr = 5;
public native int attr_size();
public native void clear_attr();
@MemberGetter public static native int kAttrFieldNumber();
public static final int kAttrFieldNumber = kAttrFieldNumber();
public native @Const @ByRef StringAttrValueMap attr();
public native StringAttrValueMap mutable_attr();
}
// ===================================================================
// ===================================================================
// #if !PROTOBUF_INLINE_NOT_IN_HEADERS
// GraphDef
// repeated .tensorflow.NodeDef node = 1;
// optional .tensorflow.VersionDef versions = 4;
// optional int32 version = 3 [deprecated = true];
// optional .tensorflow.FunctionDefLibrary library = 2;
// -------------------------------------------------------------------
// NodeDef
// optional string name = 1;
// optional string op = 2;
// repeated string input = 3;
// optional string device = 4;
// map attr = 5;
// #endif // !PROTOBUF_INLINE_NOT_IN_HEADERS
// -------------------------------------------------------------------
// @@protoc_insertion_point(namespace_scope)
// namespace tensorflow
// @@protoc_insertion_point(global_scope)
// #endif // PROTOBUF_tensorflow_2fcore_2fframework_2fgraph_2eproto__INCLUDED
// Parsed from tensorflow/core/public/session.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// #ifndef TENSORFLOW_PUBLIC_SESSION_H_
// #define TENSORFLOW_PUBLIC_SESSION_H_
// #include
// #include
// #include "tensorflow/core/framework/graph.pb.h"
// #include "tensorflow/core/framework/tensor.h"
// #include "tensorflow/core/lib/core/status.h"
// #include "tensorflow/core/platform/env.h"
// #include "tensorflow/core/protobuf/config.pb.h"
// #include "tensorflow/core/public/session_options.h"
/** \brief A Session instance lets a caller drive a TensorFlow graph
* computation.
*
* When a Session is created with a given target, a new Session object
* is bound to the universe of resources specified by that target.
* Those resources are available to this session to perform
* computation described in the GraphDef. After extending the session
* with a graph, the caller uses the Run() API to perform the
* computation and potentially fetch outputs as Tensors.
*
* Example:
*
* {@code c++
*
* tensorflow::GraphDef graph;
* // ... Create or load graph into "graph".
*
* // This example uses the default options which connects
* // to a local runtime.
* tensorflow::SessionOptions options;
* std::unique_ptr
* session(tensorflow::NewSession(options));
*
* // Create the session with this graph.
* tensorflow::Status s = session->Create(graph);
* if (!s.ok()) { ... }
*
* // Run the graph and fetch the first output of the "output"
* // operation, and also run to but do not return anything
* // for the "update_state" operation.
* std::vector outputs;
* s = session->Run({}, {"output:0"}, {"update_state"}, &outputs);
* if (!s.ok()) { ... }
*
* // Map the output as a flattened float tensor, and do something
* // with it.
* auto output_tensor = outputs[0].flat();
* if (output_tensor(0) > 0.5) { ... }
*
* // Close the session to release the resources associated with
* // this session.
* session->Close();
*
* }
*
* A Session allows concurrent calls to Run(), though a Session must
* be created / extended by a single thread.
*
* Only one thread must call Close(), and Close() must only be called
* after all other calls to Run() have returned. */
@Namespace("tensorflow") public static class Session extends AbstractSession {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Session(Pointer p) { super(p); }
///
/** Calls {@link tensorflow#NewSession(SessionOptions)} and registers a deallocator. */
public Session(SessionOptions options) { super(options); }
/** \brief Create the graph to be used for the session.
*
* Returns an error if this session has already been created with a
* graph. To re-use the session with a different graph, the caller
* must Close() the session first. */
///
public native @ByVal Status Create(@Const @ByRef GraphDef graph);
/** \brief Adds operations to the graph that is already registered with the
* Session.
*
* The names of new operations in "graph" must not exist in the
* graph that is already registered. */
///
///
///
///
public native @ByVal Status Extend(@Const @ByRef GraphDef graph);
/** \brief Runs the graph with the provided input tensors and fills
* {@code outputs} for the endpoints specified in {@code output_tensor_names}.
* Runs to but does not return Tensors for the nodes in
* {@code target_node_names}.
*
* The order of tensors in {@code outputs} will match the order provided
* by {@code output_tensor_names}.
*
* If {@code Run} returns {@code OK()}, then {@code outputs->size()} will be equal to
* {@code output_tensor_names.size()}. If {@code Run} does not return {@code OK()}, the
* state of {@code outputs} is undefined.
*
* REQUIRES: The name of each Tensor of the input or output must
* match a "Tensor endpoint" in the {@code GraphDef} passed to {@code Create()}.
*
* REQUIRES: outputs is not nullptr if {@code output_tensor_names} is non-empty. */
public native @ByVal Status Run(@Const @ByRef StringTensorPairVector inputs,
@Const @ByRef StringVector output_tensor_names,
@Const @ByRef StringVector target_node_names,
TensorVector outputs);
/** \brief Implementations which support {@code RunOptions}. */
//
/** NOTE: This API is still experimental and may change. */
public native @ByVal Status Create(@Const @ByRef RunOptions run_options, @Const @ByRef GraphDef graph);
public native @ByVal Status Extend(@Const @ByRef RunOptions run_options, @Const @ByRef GraphDef graph);
public native @ByVal Status Close(@Const @ByRef RunOptions run_options);
/** \brief Like {@code Run}, but allows users to pass in a {@code RunOptions} proto and
* to retrieve non-Tensor metadata output via a {@code RunMetadata} proto for this
* step. {@code run_metadata} may be nullptr, in which case any metadata output is
* discarded.
* NOTE: This API is still experimental and may change. */
public native @ByVal Status Run(@Const @ByRef RunOptions run_options,
@Const @ByRef StringTensorPairVector inputs,
@Const @ByRef StringVector output_tensor_names,
@Const @ByRef StringVector target_node_names,
TensorVector outputs, RunMetadata run_metadata);
/** \brief Sets up a graph for partial execution. All future feeds and
* fetches are specified by {@code input_names} and {@code output_names}. Returns
* {@code handle} that can be used to perform a sequence of partial feeds and
* fetches.
* NOTE: This API is still experimental and may change. */
public native @ByVal Status PRunSetup(@Const @ByRef StringVector input_names,
@Const @ByRef StringVector output_names,
@Const @ByRef StringVector target_nodes,
@StdString @Cast({"char*", "std::string*"}) BytePointer handle);
/** \brief Continues the pending execution specified by {@code handle} with the
* provided input tensors and fills {@code outputs} for the endpoints specified
* in {@code output_names}.
* NOTE: This API is still experimental and may change. */
///
public native @ByVal Status PRun(@StdString BytePointer handle,
@Const @ByRef StringTensorPairVector inputs,
@Const @ByRef StringVector output_names,
TensorVector outputs);
public native @ByVal Status PRun(@StdString String handle,
@Const @ByRef StringTensorPairVector inputs,
@Const @ByRef StringVector output_names,
TensorVector outputs);
/** \brief Closes this session.
*
* Closing a session releases the resources used by this session
* on the TensorFlow runtime (specified during session creation by
* the {@code SessionOptions::target} field). */
public native @ByVal Status Close();
}
/** \brief Create a new session with the given options.
*
* If a new {@code Session} object could not be created, this function will
* return nullptr. */
///
@Namespace("tensorflow") public static native Session NewSession(@Const @ByRef SessionOptions options);
/** \brief Create a new session with the given options.
*
* If session creation succeeds, the new {@code Session} will be stored in
* {@code *out_session}, the caller will take ownership of the returned
* {@code *out_session}, and this function will return {@code OK()}. Otherwise, this
* function will return an error status. */
@Namespace("tensorflow") public static native @ByVal Status NewSession(@Const @ByRef SessionOptions options, @Cast("tensorflow::Session**") PointerPointer out_session);
@Namespace("tensorflow") public static native @ByVal Status NewSession(@Const @ByRef SessionOptions options, @ByPtrPtr Session out_session);
// end namespace tensorflow
// #endif // TENSORFLOW_PUBLIC_SESSION_H_
// Parsed from tensorflow/core/public/tensor_c_api.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// TODO(jeff,sanjay): Rename to tensorflow/public/c_api.h
// #ifndef TENSORFLOW_PUBLIC_TENSOR_C_API_H_
// #define TENSORFLOW_PUBLIC_TENSOR_C_API_H_
// #include
// --------------------------------------------------------------------------
// C API for TensorFlow.
//
// The API leans towards simplicity and uniformity instead of convenience
// since most usage will be by language specific wrappers.
//
// Conventions:
// * We use the prefix TF_ for everything in the API.
// * Objects are always passed around as pointers to opaque structs
// and these structs are allocated/deallocated via the API.
// * TF_Status holds error information. It is an object type
// and therefore is passed around as a pointer to an opaque
// struct as mentioned above.
// * Every call that has a TF_Status* argument clears it on success
// and fills it with error info on failure.
//
// Questions left to address:
// * Might need to add stride info to TF_Tensor?
// * Might at some point need a way for callers to provide their own Env.
// * Should we remove the TF_Status arg from TF_AddProto calls and only
// report errors later (e.g., on Run call).
// * Should dimensions be unsigned instead of signed?
// * Maybe add TF_TensorShape that encapsulates dimension info.
//
// Design decisions made:
// * Backing store for tensor memory has an associated deallocation
// function. This deallocation function will point to client code
// for tensors populated by the client. So the client can do things
// like shadowing a numpy array.
// * We do not provide TF_OK since it is not strictly necessary and we
// are not optimizing for convenience.
// * We make assumption that one session has one graph. This should be
// fine since we have the ability to run sub-graphs.
// * We are not providing TF_AddNode/TF_AddNodes to better support
// languages/platforms where proto is not available. This is because
// we can just point authors of bindings at the .proto file and the
// proto serialization spec and they can do the right thing for
// their language.
// * We could allow NULL for some arguments (e.g., NULL options arg).
// However since convenience is not a primary goal, we don't do this.
// * Devices are not in this API. Instead, they are created/used internally
// and the API just provides high level controls over the number of
// devices of each type.
// #ifdef __cplusplus
// #endif
// --------------------------------------------------------------------------
// TF_DataType holds the type for a scalar value. E.g., one slot in a tensor.
// The enum values here are identical to corresponding values in types.proto.
/** enum TF_DataType */
public static final int
TF_FLOAT = 1,
TF_DOUBLE = 2,
TF_INT32 = 3, // Int32 tensors are always in 'host' memory.
TF_UINT8 = 4,
TF_INT16 = 5,
TF_INT8 = 6,
TF_STRING = 7,
TF_COMPLEX64 = 8, // Single-precision complex
TF_COMPLEX = 8, // Old identifier kept for API backwards compatibility
TF_INT64 = 9,
TF_BOOL = 10,
TF_QINT8 = 11, // Quantized int8
TF_QUINT8 = 12, // Quantized uint8
TF_QINT32 = 13, // Quantized int32
TF_BFLOAT16 = 14, // Float32 truncated to 16 bits. Only for cast ops.
TF_QINT16 = 15, // Quantized int16
TF_QUINT16 = 16, // Quantized uint16
TF_UINT16 = 17,
TF_COMPLEX128 = 18, // Double-precision complex
TF_HALF = 19;
// --------------------------------------------------------------------------
// TF_Code holds an error code. The enum values here are identical to
// corresponding values in error_codes.proto.
/** enum TF_Code */
public static final int
TF_OK = 0,
TF_CANCELLED = 1,
TF_UNKNOWN = 2,
TF_INVALID_ARGUMENT = 3,
TF_DEADLINE_EXCEEDED = 4,
TF_NOT_FOUND = 5,
TF_ALREADY_EXISTS = 6,
TF_PERMISSION_DENIED = 7,
TF_UNAUTHENTICATED = 16,
TF_RESOURCE_EXHAUSTED = 8,
TF_FAILED_PRECONDITION = 9,
TF_ABORTED = 10,
TF_OUT_OF_RANGE = 11,
TF_UNIMPLEMENTED = 12,
TF_INTERNAL = 13,
TF_UNAVAILABLE = 14,
TF_DATA_LOSS = 15;
// --------------------------------------------------------------------------
// TF_Status holds error information. It either has an OK code, or
// else an error code with an associated error message.
@Opaque public static class TF_Status extends Pointer {
/** Empty constructor. Calls {@code super((Pointer)null)}. */
public TF_Status() { super((Pointer)null); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TF_Status(Pointer p) { super(p); }
}
// --------------------------------------------------------------------------
// TF_Buffer holds a pointer to a block of data and its associated length.
// Typically, the data consists of a serialized protocol buffer, but other data
// may also be held in a buffer.
//
// By default, TF_Buffer itself does not do any memory management of the
// pointed-to block. If need be, users of this struct should specify how to
// deallocate the block by setting the `data_deallocator` function pointer.
public static class TF_Buffer extends Pointer {
static { Loader.load(); }
/** Default native constructor. */
public TF_Buffer() { super((Pointer)null); allocate(); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public TF_Buffer(long size) { super((Pointer)null); allocateArray(size); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TF_Buffer(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(long size);
@Override public TF_Buffer position(long position) {
return (TF_Buffer)super.position(position);
}
@MemberGetter public native @Const Pointer data();
public native @Cast("size_t") long length(); public native TF_Buffer length(long length);
public static class Data_deallocator_Pointer_long extends FunctionPointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Data_deallocator_Pointer_long(Pointer p) { super(p); }
protected Data_deallocator_Pointer_long() { allocate(); }
private native void allocate();
public native void call(Pointer data, @Cast("size_t") long length);
}
public native Data_deallocator_Pointer_long data_deallocator(); public native TF_Buffer data_deallocator(Data_deallocator_Pointer_long data_deallocator);
}
// Makes a copy of the input and sets an appropriate deallocator. Useful for
// passing in read-only, input protobufs.
public static native TF_Buffer TF_NewBufferFromString(@Const Pointer proto, @Cast("size_t") long proto_len);
// Useful for passing *out* a protobuf.
public static native TF_Buffer TF_NewBuffer();
public static native void TF_DeleteBuffer(TF_Buffer arg0);
public static native @ByVal TF_Buffer TF_GetBuffer(TF_Buffer buffer);
// --------------------------------------------------------------------------
// TF_Library holds information about dynamically loaded TensorFlow plugins.
@Opaque public static class TF_Library extends Pointer {
/** Empty constructor. Calls {@code super((Pointer)null)}. */
public TF_Library() { super((Pointer)null); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TF_Library(Pointer p) { super(p); }
}
// Return a new status object.
public static native TF_Status TF_NewStatus();
// Delete a previously created status object.
public static native void TF_DeleteStatus(TF_Status arg0);
// Record in *s. Any previous information is lost.
// A common use is to clear a status: TF_SetStatus(s, TF_OK, "");
public static native void TF_SetStatus(TF_Status s, @Cast("TF_Code") int code, @Cast("const char*") BytePointer msg);
public static native void TF_SetStatus(TF_Status s, @Cast("TF_Code") int code, String msg);
// Return the code record in *s.
public static native @Cast("TF_Code") int TF_GetCode(@Const TF_Status s);
// Return a pointer to the error message in *s. The return value
// points to memory that is only usable until the next mutation to *s.
// Always returns an empty string if TF_GetCode(s) is TF_OK.
public static native @Cast("const char*") BytePointer TF_Message(@Const TF_Status s);
// --------------------------------------------------------------------------
// TF_Tensor holds a multi-dimensional array of elements of a single data type.
// For all types other than TF_STRING, the data buffer stores elements
// in row major order. E.g. if data is treated as a vector of TF_DataType:
//
// element 0: index (0, ..., 0)
// element 1: index (0, ..., 1)
// ...
//
// TODO(jeff,sanjay): Define format for TF_STRING tensors. Perhaps:
// start_offset: array[uint64]
// data: byte[...]
//
// String length is encoded (varint?) starting at data[start_offset[i]]
// String contents follow immediately after string length.
@Opaque public static class TF_Tensor extends Pointer {
/** Empty constructor. Calls {@code super((Pointer)null)}. */
public TF_Tensor() { super((Pointer)null); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TF_Tensor(Pointer p) { super(p); }
}
// Return a new tensor that holds the bytes data[0,len-1].
//
// The data will be deallocated by a subsequent call to TF_DeleteTensor via:
// (*deallocator)(data, len, deallocator_arg)
// Clients must provide a custom deallocator function so they can pass in
// memory managed by something like numpy.
public static class Deallocator_Pointer_long_Pointer extends FunctionPointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Deallocator_Pointer_long_Pointer(Pointer p) { super(p); }
protected Deallocator_Pointer_long_Pointer() { allocate(); }
private native void allocate();
public native void call(Pointer data, @Cast("size_t") long len,
Pointer arg);
}
public static native TF_Tensor TF_NewTensor(@Cast("TF_DataType") int arg0, @Cast("long long*") LongPointer dims, int num_dims,
Pointer data, @Cast("size_t") long len,
Deallocator_Pointer_long_Pointer deallocator,
Pointer deallocator_arg);
public static native TF_Tensor TF_NewTensor(@Cast("TF_DataType") int arg0, @Cast("long long*") LongBuffer dims, int num_dims,
Pointer data, @Cast("size_t") long len,
Deallocator_Pointer_long_Pointer deallocator,
Pointer deallocator_arg);
public static native TF_Tensor TF_NewTensor(@Cast("TF_DataType") int arg0, @Cast("long long*") long[] dims, int num_dims,
Pointer data, @Cast("size_t") long len,
Deallocator_Pointer_long_Pointer deallocator,
Pointer deallocator_arg);
// Destroy a tensor.
public static native void TF_DeleteTensor(TF_Tensor arg0);
// Return the type of a tensor element.
public static native @Cast("TF_DataType") int TF_TensorType(@Const TF_Tensor arg0);
// Return the number of dimensions that the tensor has.
public static native int TF_NumDims(@Const TF_Tensor arg0);
// Return the length of the tensor in the "dim_index" dimension.
// REQUIRES: 0 <= dim_index < TF_NumDims(tensor)
public static native @Cast("long long") long TF_Dim(@Const TF_Tensor tensor, int dim_index);
// Return the size of the underlying data in bytes.
public static native @Cast("size_t") long TF_TensorByteSize(@Const TF_Tensor arg0);
// Return a pointer to the underlying data buffer.
public static native Pointer TF_TensorData(@Const TF_Tensor arg0);
// --------------------------------------------------------------------------
// TF_SessionOptions holds options that can be passed during session creation.
@Opaque public static class TF_SessionOptions extends Pointer {
/** Empty constructor. Calls {@code super((Pointer)null)}. */
public TF_SessionOptions() { super((Pointer)null); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TF_SessionOptions(Pointer p) { super(p); }
}
// Return a new options object.
public static native TF_SessionOptions TF_NewSessionOptions();
// Set the target in TF_SessionOptions.options.
// target can be empty, a single entry, or a comma separated list of entries.
// Each entry is in one of the following formats :
// "local"
// ip:port
// host:port
public static native void TF_SetTarget(TF_SessionOptions options, @Cast("const char*") BytePointer target);
public static native void TF_SetTarget(TF_SessionOptions options, String target);
// Set the config in TF_SessionOptions.options.
// config should be a serialized brain.ConfigProto proto.
// If config was not parsed successfully as a ConfigProto, record the
// error information in *status.
public static native void TF_SetConfig(TF_SessionOptions options, @Const Pointer proto,
@Cast("size_t") long proto_len, TF_Status status);
// Destroy an options object.
public static native void TF_DeleteSessionOptions(TF_SessionOptions arg0);
// TODO(jeff,sanjay):
// - export functions to set Config fields
// --------------------------------------------------------------------------
// TF_Session manages a single graph and execution.
@Opaque public static class TF_Session extends Pointer {
/** Empty constructor. Calls {@code super((Pointer)null)}. */
public TF_Session() { super((Pointer)null); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TF_Session(Pointer p) { super(p); }
}
// Return a new execution session, or NULL on error.
public static native TF_Session TF_NewSession(@Const TF_SessionOptions arg0, TF_Status status);
// Close a session.
public static native void TF_CloseSession(TF_Session arg0, TF_Status status);
// Destroy a session. Even if error information is recorded in *status,
// this call discards all resources associated with the session.
public static native void TF_DeleteSession(TF_Session arg0, TF_Status status);
// Treat the bytes proto[0,proto_len-1] as a serialized GraphDef and
// add the nodes in that GraphDef to the graph for the session.
public static native void TF_ExtendGraph(TF_Session arg0, @Const Pointer proto, @Cast("size_t") long proto_len,
TF_Status arg3);
// Run the graph associated with the session starting with the
// supplied inputs (inputs[0,ninputs-1]). Regardless of success or
// failure, inputs[] become the property of the implementation (the
// implementation will eventually call TF_DeleteTensor on each input).
//
// Any NULL and non-NULL value combinations for (`run_options`,
// `run_metadata`) are valid.
//
// - `run_options` may be NULL, in which case it will be ignored; or
// non-NULL, in which case it must point to a `TF_Buffer` containing the
// serialized representation of a `RunOptions` protocol buffer.
// - `run_metadata` may be NULL, in which case it will be ignored; or
// non-NULL, in which case it must point to an empty, freshly allocated
// `TF_Buffer` that may be updated to contain the serialized representation
// of a `RunMetadata` protocol buffer.
//
// The caller retains the ownership of `run_options` and/or `run_metadata` (when
// not NULL) and should manually call TF_DeleteBuffer on them.
//
// On success, the tensors corresponding to output_names[0,noutputs-1]
// are placed in outputs[], and these outputs[] become the property
// of the caller (the caller must eventually call TF_DeleteTensor on
// them).
//
// On failure, outputs[] contains NULLs.
public static native void TF_Run(TF_Session arg0,
@Const TF_Buffer run_options,
@Cast("const char**") PointerPointer input_names, @Cast("TF_Tensor**") PointerPointer inputs, int ninputs,
@Cast("const char**") PointerPointer output_tensor_names, @Cast("TF_Tensor**") PointerPointer outputs,
int noutputs,
@Cast("const char**") PointerPointer target_node_names, int ntargets,
TF_Buffer run_metadata,
TF_Status arg11);
public static native void TF_Run(TF_Session arg0,
@Const TF_Buffer run_options,
@Cast("const char**") @ByPtrPtr BytePointer input_names, @ByPtrPtr TF_Tensor inputs, int ninputs,
@Cast("const char**") @ByPtrPtr BytePointer output_tensor_names, @ByPtrPtr TF_Tensor outputs,
int noutputs,
@Cast("const char**") @ByPtrPtr BytePointer target_node_names, int ntargets,
TF_Buffer run_metadata,
TF_Status arg11);
public static native void TF_Run(TF_Session arg0,
@Const TF_Buffer run_options,
@Cast("const char**") @ByPtrPtr ByteBuffer input_names, @ByPtrPtr TF_Tensor inputs, int ninputs,
@Cast("const char**") @ByPtrPtr ByteBuffer output_tensor_names, @ByPtrPtr TF_Tensor outputs,
int noutputs,
@Cast("const char**") @ByPtrPtr ByteBuffer target_node_names, int ntargets,
TF_Buffer run_metadata,
TF_Status arg11);
public static native void TF_Run(TF_Session arg0,
@Const TF_Buffer run_options,
@Cast("const char**") @ByPtrPtr byte[] input_names, @ByPtrPtr TF_Tensor inputs, int ninputs,
@Cast("const char**") @ByPtrPtr byte[] output_tensor_names, @ByPtrPtr TF_Tensor outputs,
int noutputs,
@Cast("const char**") @ByPtrPtr byte[] target_node_names, int ntargets,
TF_Buffer run_metadata,
TF_Status arg11);
// Set up the graph with the intended feeds and fetches for a sequence
// of partial run calls.
//
// On success, returns a handle that is used for subsequent PRun calls.
//
// On failure, out_status contains a tensorflow::Status with an error
// message.
// NOTE: This is EXPERIMENTAL and subject to change.
public static native void TF_PRunSetup(TF_Session arg0,
@Cast("const char**") PointerPointer input_names, int ninputs,
@Cast("const char**") PointerPointer output_tensor_names, int noutputs,
@Cast("const char**") PointerPointer target_node_names, int ntargets,
@Cast("char**") PointerPointer handle,
TF_Status arg8);
public static native void TF_PRunSetup(TF_Session arg0,
@Cast("const char**") @ByPtrPtr BytePointer input_names, int ninputs,
@Cast("const char**") @ByPtrPtr BytePointer output_tensor_names, int noutputs,
@Cast("const char**") @ByPtrPtr BytePointer target_node_names, int ntargets,
@Cast("char**") @ByPtrPtr BytePointer handle,
TF_Status arg8);
public static native void TF_PRunSetup(TF_Session arg0,
@Cast("const char**") @ByPtrPtr ByteBuffer input_names, int ninputs,
@Cast("const char**") @ByPtrPtr ByteBuffer output_tensor_names, int noutputs,
@Cast("const char**") @ByPtrPtr ByteBuffer target_node_names, int ntargets,
@Cast("char**") @ByPtrPtr ByteBuffer handle,
TF_Status arg8);
public static native void TF_PRunSetup(TF_Session arg0,
@Cast("const char**") @ByPtrPtr byte[] input_names, int ninputs,
@Cast("const char**") @ByPtrPtr byte[] output_tensor_names, int noutputs,
@Cast("const char**") @ByPtrPtr byte[] target_node_names, int ntargets,
@Cast("char**") @ByPtrPtr byte[] handle,
TF_Status arg8);
// Continue to run the graph with additional feeds and fetches. The
// execution state is uniquely identified by the handle.
// NOTE: This is EXPERIMENTAL and subject to change.
public static native void TF_PRun(TF_Session arg0, @Cast("const char*") BytePointer handle,
@Cast("const char**") PointerPointer input_names, @Cast("TF_Tensor**") PointerPointer inputs, int ninputs,
@Cast("const char**") PointerPointer output_tensor_names, @Cast("TF_Tensor**") PointerPointer outputs,
int noutputs,
@Cast("const char**") PointerPointer target_node_names, int ntargets,
TF_Status arg10);
public static native void TF_PRun(TF_Session arg0, @Cast("const char*") BytePointer handle,
@Cast("const char**") @ByPtrPtr BytePointer input_names, @ByPtrPtr TF_Tensor inputs, int ninputs,
@Cast("const char**") @ByPtrPtr BytePointer output_tensor_names, @ByPtrPtr TF_Tensor outputs,
int noutputs,
@Cast("const char**") @ByPtrPtr BytePointer target_node_names, int ntargets,
TF_Status arg10);
public static native void TF_PRun(TF_Session arg0, String handle,
@Cast("const char**") @ByPtrPtr ByteBuffer input_names, @ByPtrPtr TF_Tensor inputs, int ninputs,
@Cast("const char**") @ByPtrPtr ByteBuffer output_tensor_names, @ByPtrPtr TF_Tensor outputs,
int noutputs,
@Cast("const char**") @ByPtrPtr ByteBuffer target_node_names, int ntargets,
TF_Status arg10);
public static native void TF_PRun(TF_Session arg0, @Cast("const char*") BytePointer handle,
@Cast("const char**") @ByPtrPtr byte[] input_names, @ByPtrPtr TF_Tensor inputs, int ninputs,
@Cast("const char**") @ByPtrPtr byte[] output_tensor_names, @ByPtrPtr TF_Tensor outputs,
int noutputs,
@Cast("const char**") @ByPtrPtr byte[] target_node_names, int ntargets,
TF_Status arg10);
public static native void TF_PRun(TF_Session arg0, String handle,
@Cast("const char**") @ByPtrPtr BytePointer input_names, @ByPtrPtr TF_Tensor inputs, int ninputs,
@Cast("const char**") @ByPtrPtr BytePointer output_tensor_names, @ByPtrPtr TF_Tensor outputs,
int noutputs,
@Cast("const char**") @ByPtrPtr BytePointer target_node_names, int ntargets,
TF_Status arg10);
public static native void TF_PRun(TF_Session arg0, @Cast("const char*") BytePointer handle,
@Cast("const char**") @ByPtrPtr ByteBuffer input_names, @ByPtrPtr TF_Tensor inputs, int ninputs,
@Cast("const char**") @ByPtrPtr ByteBuffer output_tensor_names, @ByPtrPtr TF_Tensor outputs,
int noutputs,
@Cast("const char**") @ByPtrPtr ByteBuffer target_node_names, int ntargets,
TF_Status arg10);
public static native void TF_PRun(TF_Session arg0, String handle,
@Cast("const char**") @ByPtrPtr byte[] input_names, @ByPtrPtr TF_Tensor inputs, int ninputs,
@Cast("const char**") @ByPtrPtr byte[] output_tensor_names, @ByPtrPtr TF_Tensor outputs,
int noutputs,
@Cast("const char**") @ByPtrPtr byte[] target_node_names, int ntargets,
TF_Status arg10);
// --------------------------------------------------------------------------
// Load plugins containing custom ops and kernels
// Load the library specified by library_filename and register the ops and
// kernels present in that library.
//
// Pass "library_filename" to a platform-specific mechanism for dynamically
// loading a library. The rules for determining the exact location of the
// library are platform-specific and are not documented here.
// Expects the symbols "RegisterOps", "RegisterKernels", and "GetOpList", to be
// defined in the library.
//
// On success, place OK in status and return the newly created library handle.
// The caller owns the library handle.
//
// On failure, place an error status in status and return NULL.
public static native TF_Library TF_LoadLibrary(@Cast("const char*") BytePointer library_filename,
TF_Status status);
public static native TF_Library TF_LoadLibrary(String library_filename,
TF_Status status);
// Get the OpList of OpDefs defined in the library pointed by lib_handle.
//
// Returns a TF_Buffer. The memory pointed to by the result is owned by
// lib_handle. The data in the buffer will be the serialized OpList proto for
// ops defined in the library.
public static native @ByVal TF_Buffer TF_GetOpList(TF_Library lib_handle);
// #ifdef __cplusplus /* end extern "C" */
// #endif
// #endif // TENSORFLOW_PUBLIC_TENSOR_C_API_H_
// Parsed from tensorflow/core/framework/op_def_builder.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Class and associated machinery for specifying an Op's OpDef for Op
// registration.
// #ifndef TENSORFLOW_FRAMEWORK_OP_DEF_BUILDER_H_
// #define TENSORFLOW_FRAMEWORK_OP_DEF_BUILDER_H_
// #include
// #include
// #include "tensorflow/core/framework/op_def.pb.h"
// #include "tensorflow/core/lib/core/status.h"
// #include "tensorflow/core/lib/core/stringpiece.h"
// Builder class passed to the REGISTER_OP() macro.
@Namespace("tensorflow") @NoOffset public static class OpDefBuilder extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public OpDefBuilder(Pointer p) { super(p); }
// Constructs an OpDef with just the name field set.
public OpDefBuilder(@StringPiece BytePointer op_name) { super((Pointer)null); allocate(op_name); }
private native void allocate(@StringPiece BytePointer op_name);
public OpDefBuilder(@StringPiece String op_name) { super((Pointer)null); allocate(op_name); }
private native void allocate(@StringPiece String op_name);
// Adds an attr to this OpDefBuilder (and returns *this). The spec has
// format ":" or ":="
// where matches regexp [a-zA-Z][a-zA-Z0-9_]*
// (by convention only using capital letters for attrs that can be inferred)
// can be:
// "string", "int", "float", "bool", "type", "shape", or "tensor"
// "numbertype", "realnumbertype", "quantizedtype", "{int32,int64}"
// (meaning "type" with a restriction on valid values)
// "{\"foo\", \"bar\n baz\"}", or "{'foo', 'bar\n baz'}"
// (meaning "string" with a restriction on valid values)
// "list(string)", ..., "list(tensor)", "list(numbertype)", ...
// (meaning lists of the above types)
// "int >= 2" (meaning "int" with a restriction on valid values)
// "list(string) >= 2", "list(int) >= 2"
// (meaning "list(string)" / "list(int)" with length at least 2)
// , if included, should use the Proto text format
// of . For lists use [a, b, c] format.
//
// Note that any attr specifying the length of an input or output will
// get a default minimum of 1 unless the >= # syntax is used.
//
// TODO(josh11b): Perhaps support restrictions and defaults as optional
// extra arguments to Attr() instead of encoding them in the spec string.
// TODO(josh11b): Would like to have better dtype handling for tensor attrs:
// * Ability to say the type of an input/output matches the type of
// the tensor.
// * Ability to restrict the type of the tensor like the existing
// restrictions for type attrs.
// Perhaps by linking the type of the tensor to a type attr?
public native @ByRef OpDefBuilder Attr(@StringPiece BytePointer spec);
public native @ByRef OpDefBuilder Attr(@StringPiece String spec);
// Adds an input or output to this OpDefBuilder (and returns *this).
// The spec has form ":" or ":Ref()"
// where matches regexp [a-z][a-z0-9_]* and can be:
// * For a single tensor:
// * For a sequence of tensors with the same type: *
// * For a sequence of tensors with different types:
// Where:
// is either one of "float", "int32", "string", ...
// or the name of an attr (see above) with type "type".
// is the name of an attr with type "int".
// is the name of an attr with type "list(type)".
// TODO(josh11b): Indicate Ref() via an optional argument instead of
// in the spec?
// TODO(josh11b): SparseInput() and SparseOutput() matching the Python
// handling?
public native @ByRef OpDefBuilder Input(@StringPiece BytePointer spec);
public native @ByRef OpDefBuilder Input(@StringPiece String spec);
public native @ByRef OpDefBuilder Output(@StringPiece BytePointer spec);
public native @ByRef OpDefBuilder Output(@StringPiece String spec);
// Turns on the indicated boolean flag in this OpDefBuilder (and
// returns *this).
public native @ByRef OpDefBuilder SetIsCommutative();
public native @ByRef OpDefBuilder SetIsAggregate();
public native @ByRef OpDefBuilder SetIsStateful();
public native @ByRef OpDefBuilder SetAllowsUninitializedInput();
// Adds docs to this OpDefBuilder (and returns *this).
// Docs have the format:
// <1-line summary>
//
// :
// :
//
// Where is the name of an attr, input, or output. Please
// wrap docs at 72 columns so that it may be indented in the
// generated output. For tensor inputs or outputs (not attrs), you
// may start the description with an "=" (like name:= )
// to suppress the automatically-generated type documentation in
// generated output.
// #ifndef TF_LEAN_BINARY
public native @ByRef OpDefBuilder Doc(@StringPiece BytePointer text);
public native @ByRef OpDefBuilder Doc(@StringPiece String text);
// #else
// #endif
// Sets *op_def to the requested OpDef, or returns an error.
// Must be called after all of the above methods.
// Note that OpDefBuilder only reports parsing errors. You should also
// call ValidateOpDef() to detect other problems.
public native @ByVal Status Finalize(OpDef op_def);
}
// namespace tensorflow
// #endif // TENSORFLOW_FRAMEWORK_OP_DEF_BUILDER_H_
// Parsed from tensorflow/core/framework/op_def_util.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// TODO(josh11b): Probably not needed for OpKernel authors, so doesn't
// need to be as publicly accessible as other files in framework/.
// #ifndef TENSORFLOW_FRAMEWORK_OP_DEF_UTIL_H_
// #define TENSORFLOW_FRAMEWORK_OP_DEF_UTIL_H_
// #include
// #include "tensorflow/core/framework/op_def.pb.h"
// #include "tensorflow/core/lib/core/status.h"
// Performs a consistency check across the fields of the op_def.
@Namespace("tensorflow") public static native @ByVal Status ValidateOpDef(@Const @ByRef OpDef op_def);
// Validates that attr_value satisfies the type and constraints from attr.
// REQUIRES: attr has already been validated.
@Namespace("tensorflow") public static native @ByVal Status ValidateAttrValue(@Const @ByRef AttrValue attr_value,
@Cast("const tensorflow::OpDef::AttrDef*") @ByRef OpDef_AttrDef attr);
// The following search through op_def for an attr with the indicated name.
// Returns nullptr if no such attr is found.
@Namespace("tensorflow") public static native @Cast("const tensorflow::OpDef::AttrDef*") OpDef_AttrDef FindAttr(@StringPiece BytePointer name, @Const @ByRef OpDef op_def);
@Namespace("tensorflow") public static native @Cast("const tensorflow::OpDef::AttrDef*") OpDef_AttrDef FindAttr(@StringPiece String name, @Const @ByRef OpDef op_def);
@Namespace("tensorflow") public static native @Cast("tensorflow::OpDef::AttrDef*") OpDef_AttrDef FindAttrMutable(@StringPiece BytePointer name, OpDef op_def);
@Namespace("tensorflow") public static native @Cast("tensorflow::OpDef::AttrDef*") OpDef_AttrDef FindAttrMutable(@StringPiece String name, OpDef op_def);
// Produce a human-readable version of an op_def that is more concise
// than a text-format proto. Excludes descriptions.
@Namespace("tensorflow") public static native @StdString BytePointer SummarizeOpDef(@Const @ByRef OpDef op_def);
// Returns an error if new_op is not backwards-compatible with (more
// accepting than) old_op.
// REQUIRES: old_op and new_op must pass validation.
@Namespace("tensorflow") public static native @ByVal Status OpDefCompatible(@Const @ByRef OpDef old_op, @Const @ByRef OpDef new_op);
// Returns an error if any attr in penultimate_op that is not in old_op
// has a different default value in new_op. In general it is not safe
// to change the default for an attr that has been added to an op.
@Namespace("tensorflow") public static native @ByVal Status OpDefAddedDefaultsUnchanged(@Const @ByRef OpDef old_op,
@Const @ByRef OpDef penultimate_op,
@Const @ByRef OpDef new_op);
// Remove all docs from *op_def / *op_list.
@Namespace("tensorflow") public static native void RemoveDescriptionsFromOpDef(OpDef op_def);
@Namespace("tensorflow") public static native void RemoveDescriptionsFromOpList(OpList op_list);
// namespace tensorflow
// #endif // TENSORFLOW_FRAMEWORK_OP_DEF_UTIL_H_
// Parsed from tensorflow/core/framework/op.h
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// #ifndef TENSORFLOW_FRAMEWORK_OP_H_
// #define TENSORFLOW_FRAMEWORK_OP_H_
// #include
// #include
// #include
// #include "tensorflow/core/framework/op_def.pb.h"
// #include "tensorflow/core/framework/op_def_builder.h"
// #include "tensorflow/core/framework/op_def_util.h"
// #include "tensorflow/core/framework/selective_registration.h"
// #include "tensorflow/core/lib/core/status.h"
// #include "tensorflow/core/lib/strings/str_util.h"
// #include "tensorflow/core/lib/strings/strcat.h"
// #include "tensorflow/core/platform/logging.h"
// #include "tensorflow/core/platform/macros.h"
// #include "tensorflow/core/platform/mutex.h"
// #include "tensorflow/core/platform/thread_annotations.h"
// #include "tensorflow/core/platform/types.h"
// Users that want to look up an OpDef by type name should take an
// OpRegistryInterface. Functions accepting a
// (const) OpRegistryInterface* may call LookUp() from multiple threads.
@Namespace("tensorflow") public static class OpRegistryInterface extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public OpRegistryInterface(Pointer p) { super(p); }
// Returns nullptr and sets *status if no OpDef is registered under that
// name, otherwise returns the registered OpDef.
// Caller must not delete the returned pointer.
public native @Const OpDef LookUp(@StdString BytePointer op_type_name,
Status status);
public native @Const OpDef LookUp(@StdString String op_type_name,
Status status);
}
// The standard implementation of OpRegistryInterface, along with a
// global singleton used for registering OpDefs via the REGISTER
// macros below. Thread-safe.
//
// Example registration:
// OpRegistry::Global()->Register([]()->OpDef{
// OpDef def;
// // Populate def here.
// return def;
// });
@Namespace("tensorflow") @NoOffset public static class OpRegistry extends OpRegistryInterface {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public OpRegistry(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public OpRegistry(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public OpRegistry position(long position) {
return (OpRegistry)super.position(position);
}
public OpRegistry() { super((Pointer)null); allocate(); }
private native void allocate();
// Calls func() and registers the returned OpDef. Since Register()
// is normally called during program initialization (before main()),
// we defer calling func() until the first call to LookUp() or
// Export() (if one of those has already been called, func() is
// called immediately).
public native void Register(@Const @ByRef OpDef op_def);
public native @Const OpDef LookUp(@StdString BytePointer op_type_name,
Status status);
public native @Const OpDef LookUp(@StdString String op_type_name,
Status status);
// Fills *ops with all registered OpDefs (except those with names
// starting with '_' if include_internal == false).
public native void Export(@Cast("bool") boolean include_internal, OpList ops);
// Returns ASCII-format OpList for all registered OpDefs (except
// those with names starting with '_' if include_internal == false).
public native @StdString BytePointer DebugString(@Cast("bool") boolean include_internal);
// A singleton available at startup.
public static native OpRegistry Global();
// Get all registered ops.
public native void GetRegisteredOps(@StdVector OpDef op_defs);
// Watcher, a function object.
// watcher_, if not null, is called every time an op is registered via the
// Register function. watcher_ is passed the OpDef of the op getting
// registered.
// An OpRegistry object has only one watcher. This interface is not thread
// safe, as different clients are free to set the watcher any time.
// Clients are expected to atomically perform the following sequence of
// operations :
// SetWatcher(a_watcher);
// Register some ops;
// SetWatcher(nullptr);
// Returns a non-OK status if a non-null watcher is over-written by another
// non-null watcher.
public native @ByVal Status SetWatcher(@Cast("const tensorflow::OpRegistry::Watcher*") @ByRef Fn watcher);
}
// An adapter to allow an OpList to be used as an OpRegistryInterface.
@Namespace("tensorflow") @NoOffset public static class OpListOpRegistry extends OpRegistryInterface {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public OpListOpRegistry(Pointer p) { super(p); }
// Does not take ownership of op_list, *op_list must outlive *this.
public OpListOpRegistry(@Const OpList op_list) { super((Pointer)null); allocate(op_list); }
private native void allocate(@Const OpList op_list);
public native @Const OpDef LookUp(@StdString BytePointer op_type_name,
Status status);
public native @Const OpDef LookUp(@StdString String op_type_name,
Status status);
}
// Treats 'registry_ptr' as a pointer to OpRegistry, and calls
// registry_ptr->Register(op_def) for each op_def that has been registered with
// the current library's global op registry (obtained by calling
// OpRegistry::Global().
@Namespace("tensorflow") public static native void RegisterOps(Pointer registry_ptr);
// Support for defining the OpDef (specifying the semantics of the Op and how
// it should be created) and registering it in the OpRegistry::Global()
// registry. Usage:
//
// REGISTER_OP("my_op_name")
// .Attr(":")
// .Attr(":=")
// .Input(":")
// .Input(":Ref()")
// .Output(":")
// .Doc(R"(
// <1-line summary>
//
// :
// :
// )");
//
// Note: .Doc() should be last.
// For details, see the OpDefBuilder class in op_def_builder.h.
// OpDefBuilderWrapper is a templated class that is used in the REGISTER_OP
// calls. This allows the result of REGISTER_OP to be used in chaining, as in
// REGISTER_OP(a).Attr("...").Input("...");, while still allowing selective
// registration to turn the entire call-chain into a no-op.
// Template specialization that forwards all calls to the contained builder.
@Name("tensorflow::register_op::OpDefBuilderWrapper") @NoOffset public static class TrueOpDefBuilderWrapper extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TrueOpDefBuilderWrapper(Pointer p) { super(p); }
public TrueOpDefBuilderWrapper(@Cast("const char*") BytePointer name) { super((Pointer)null); allocate(name); }
private native void allocate(@Cast("const char*") BytePointer name);
public TrueOpDefBuilderWrapper(String name) { super((Pointer)null); allocate(name); }
private native void allocate(String name);
public native @ByRef TrueOpDefBuilderWrapper Attr(@StringPiece BytePointer spec);
public native @ByRef TrueOpDefBuilderWrapper Attr(@StringPiece String spec);
public native @ByRef TrueOpDefBuilderWrapper Input(@StringPiece BytePointer spec);
public native @ByRef TrueOpDefBuilderWrapper Input(@StringPiece String spec);
public native @ByRef TrueOpDefBuilderWrapper Output(@StringPiece BytePointer spec);
public native @ByRef TrueOpDefBuilderWrapper Output(@StringPiece String spec);
public native @ByRef TrueOpDefBuilderWrapper SetIsCommutative();
public native @ByRef TrueOpDefBuilderWrapper SetIsAggregate();
public native @ByRef TrueOpDefBuilderWrapper SetIsStateful();
public native @ByRef TrueOpDefBuilderWrapper SetAllowsUninitializedInput();
public native @ByRef TrueOpDefBuilderWrapper Doc(@StringPiece BytePointer text);
public native @ByRef TrueOpDefBuilderWrapper Doc(@StringPiece String text);
public native @Const @ByRef OpDefBuilder builder();
}
// Template specialization that turns all calls into no-ops.
@Name("tensorflow::register_op::OpDefBuilderWrapper