All Downloads are FREE. Search and download functionalities are using the official Maven repository.

-api.1.0.0-beta.source-code.ops.proto Maven / Gradle / Ivy

There is a newer version: 1.0.0-M2.1
Show newest version
op {
  name: "Abort"
  attr {
    name: "error_msg"
    type: "string"
    default_value {
      s: ""
    }
    description: "A string which is the message associated with the exception."
  }
  attr {
    name: "exit_without_error"
    type: "bool"
    default_value {
      b: false
    }
  }
  summary: "Raise a exception to abort the process when called."
  description: "If exit_without_error is true, the process will exit normally,\notherwise it will exit with a SIGABORT signal.\n\nReturns nothing but an exception."
}
op {
  name: "Abs"
  input_arg {
    name: "x"
    type_attr: "T"
  }
  output_arg {
    name: "y"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT32
        type: DT_INT64
      }
    }
  }
  summary: "Computes the absolute value of a tensor."
  description: "Given a tensor `x`, this operation returns a tensor containing the absolute\nvalue of each element in `x`. For example, if x is an input element and y is\nan output element, this operation computes \\\\(y = |x|\\\\)."
}
op {
  name: "AccumulateNV2"
  input_arg {
    name: "inputs"
    description: "A list of `Tensor` objects, each with same shape and type."
    type_attr: "T"
    number_attr: "N"
  }
  output_arg {
    name: "sum"
    type_attr: "T"
  }
  attr {
    name: "N"
    type: "int"
    has_minimum: true
    minimum: 1
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "shape"
    type: "shape"
    description: "Shape of elements of `inputs`."
  }
  summary: "Returns the element-wise sum of a list of tensors."
  description: "`tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not\nwait for all of its inputs to be ready before beginning to sum. This can\nsave memory if inputs are ready at different times, since minimum temporary\nstorage is proportional to the output size rather than the inputs size.\n\nUnlike the original `accumulate_n`, `accumulate_n_v2` is differentiable.\n\nReturns a `Tensor` of same shape and type as the elements of `inputs`."
  is_aggregate: true
  is_commutative: true
}
op {
  name: "AccumulatorApplyGradient"
  input_arg {
    name: "handle"
    description: "The handle to a accumulator."
    type: DT_STRING
    is_ref: true
  }
  input_arg {
    name: "local_step"
    description: "The local_step value at which the gradient was computed."
    type: DT_INT64
  }
  input_arg {
    name: "gradient"
    description: "A tensor of the gradient to be accumulated."
    type_attr: "dtype"
  }
  attr {
    name: "dtype"
    type: "type"
    description: "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator."
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  summary: "Applies a gradient to a given accumulator."
  description: "Does not add if local_step is lesser than the accumulator\'s global_step."
}
op {
  name: "AccumulatorNumAccumulated"
  input_arg {
    name: "handle"
    description: "The handle to an accumulator."
    type: DT_STRING
    is_ref: true
  }
  output_arg {
    name: "num_accumulated"
    description: "The number of gradients aggregated in the given accumulator."
    type: DT_INT32
  }
  summary: "Returns the number of gradients aggregated in the given accumulators."
}
op {
  name: "AccumulatorSetGlobalStep"
  input_arg {
    name: "handle"
    description: "The handle to an accumulator."
    type: DT_STRING
    is_ref: true
  }
  input_arg {
    name: "new_global_step"
    description: "The new global_step value to set."
    type: DT_INT64
  }
  summary: "Updates the accumulator with a new value for global_step."
  description: "Logs warning if the accumulator\'s value is already higher than\nnew_global_step."
}
op {
  name: "AccumulatorTakeGradient"
  input_arg {
    name: "handle"
    description: "The handle to an accumulator."
    type: DT_STRING
    is_ref: true
  }
  input_arg {
    name: "num_required"
    description: "Number of gradients required before we return an aggregate."
    type: DT_INT32
  }
  output_arg {
    name: "average"
    description: "The average of the accumulated gradients."
    type_attr: "dtype"
  }
  attr {
    name: "dtype"
    type: "type"
    description: "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator."
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  summary: "Extracts the average gradient in the given ConditionalAccumulator."
  description: "The op blocks until sufficient (i.e., more than num_required)\ngradients have been accumulated.  If the accumulator has already\naggregated more than num_required gradients, it returns the average of\nthe accumulated gradients.  Also automatically increments the recorded\nglobal_step in the accumulator by 1, and resets the aggregate to 0."
}
op {
  name: "Acos"
  input_arg {
    name: "x"
    type_attr: "T"
  }
  output_arg {
    name: "y"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT32
        type: DT_INT64
        type: DT_COMPLEX64
        type: DT_COMPLEX128
      }
    }
  }
  summary: "Computes acos of x element-wise."
}
op {
  name: "Acosh"
  input_arg {
    name: "x"
    type_attr: "T"
  }
  output_arg {
    name: "y"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_COMPLEX64
        type: DT_COMPLEX128
      }
    }
  }
  summary: "Computes inverse hyperbolic cosine of x element-wise."
}
op {
  name: "Add"
  input_arg {
    name: "x"
    type_attr: "T"
  }
  input_arg {
    name: "y"
    type_attr: "T"
  }
  output_arg {
    name: "z"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_UINT8
        type: DT_INT8
        type: DT_INT16
        type: DT_INT32
        type: DT_INT64
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_STRING
      }
    }
  }
  summary: "Returns x + y element-wise."
  description: "*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)"
}
op {
  name: "AddManySparseToTensorsMap"
  input_arg {
    name: "sparse_indices"
    description: "2-D.  The `indices` of the minibatch `SparseTensor`.\n`sparse_indices[:, 0]` must be ordered values in `[0, N)`."
    type: DT_INT64
  }
  input_arg {
    name: "sparse_values"
    description: "1-D.  The `values` of the minibatch `SparseTensor`."
    type_attr: "T"
  }
  input_arg {
    name: "sparse_shape"
    description: "1-D.  The `shape` of the minibatch `SparseTensor`.\nThe minibatch size `N == sparse_shape[0]`."
    type: DT_INT64
  }
  output_arg {
    name: "sparse_handles"
    description: "1-D.  The handles of the `SparseTensor` now stored in the\n`SparseTensorsMap`.  Shape: `[N]`."
    type: DT_INT64
  }
  attr {
    name: "T"
    type: "type"
  }
  attr {
    name: "container"
    type: "string"
    default_value {
      s: ""
    }
    description: "The container name for the `SparseTensorsMap` created by this op."
  }
  attr {
    name: "shared_name"
    type: "string"
    default_value {
      s: ""
    }
    description: "The shared name for the `SparseTensorsMap` created by this op.\nIf blank, the new Operation\'s unique name is used."
  }
  summary: "Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles."
  description: "A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`,\n`sparse_values`, and `sparse_shape`, where\n\n```sparse_indices.shape[1] == sparse_shape.shape[0] == R```\n\nAn `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor`\nhaving a first `sparse_indices` column taking values between `[0, N)`, where\nthe minibatch size `N == sparse_shape[0]`.\n\nThe input `SparseTensor` must have rank `R` greater than 1, and the first\ndimension is treated as the minibatch dimension.  Elements of the `SparseTensor`\nmust be sorted in increasing order of this first dimension.  The stored\n`SparseTensor` objects pointed to by each row of the output `sparse_handles`\nwill have rank `R-1`.\n\nThe `SparseTensor` values can then be read out as part of a minibatch by passing\nthe given keys as vector elements to `TakeManySparseFromTensorsMap`.  To ensure\nthe correct `SparseTensorsMap` is accessed, ensure that the same\n`container` and `shared_name` are passed to that Op.  If no `shared_name`\nis provided here, instead use the *name* of the Operation created by calling\n`AddManySparseToTensorsMap` as the `shared_name` passed to\n`TakeManySparseFromTensorsMap`.  Ensure the Operations are colocated."
  is_stateful: true
}
op {
  name: "AddN"
  input_arg {
    name: "inputs"
    description: "Must all be the same size and shape."
    type_attr: "T"
    number_attr: "N"
  }
  output_arg {
    name: "sum"
    type_attr: "T"
  }
  attr {
    name: "N"
    type: "int"
    has_minimum: true
    minimum: 1
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
        type: DT_VARIANT
      }
    }
  }
  summary: "Add all input tensors element wise."
  is_aggregate: true
  is_commutative: true
}
op {
  name: "AddSparseToTensorsMap"
  input_arg {
    name: "sparse_indices"
    description: "2-D.  The `indices` of the `SparseTensor`."
    type: DT_INT64
  }
  input_arg {
    name: "sparse_values"
    description: "1-D.  The `values` of the `SparseTensor`."
    type_attr: "T"
  }
  input_arg {
    name: "sparse_shape"
    description: "1-D.  The `shape` of the `SparseTensor`."
    type: DT_INT64
  }
  output_arg {
    name: "sparse_handle"
    description: "0-D.  The handle of the `SparseTensor` now stored in the\n`SparseTensorsMap`."
    type: DT_INT64
  }
  attr {
    name: "T"
    type: "type"
  }
  attr {
    name: "container"
    type: "string"
    default_value {
      s: ""
    }
    description: "The container name for the `SparseTensorsMap` created by this op."
  }
  attr {
    name: "shared_name"
    type: "string"
    default_value {
      s: ""
    }
    description: "The shared name for the `SparseTensorsMap` created by this op.\nIf blank, the new Operation\'s unique name is used."
  }
  summary: "Add a `SparseTensor` to a `SparseTensorsMap` return its handle."
  description: "A `SparseTensor` is represented by three tensors: `sparse_indices`,\n`sparse_values`, and `sparse_shape`.\n\nThis operator takes the given `SparseTensor` and adds it to a container\nobject (a `SparseTensorsMap`).  A unique key within this container is generated\nin the form of an `int64`, and this is the value that is returned.\n\nThe `SparseTensor` can then be read out as part of a minibatch by passing\nthe key as a vector element to `TakeManySparseFromTensorsMap`.  To ensure\nthe correct `SparseTensorsMap` is accessed, ensure that the same\n`container` and `shared_name` are passed to that Op.  If no `shared_name`\nis provided here, instead use the *name* of the Operation created by calling\n`AddSparseToTensorsMap` as the `shared_name` passed to\n`TakeManySparseFromTensorsMap`.  Ensure the Operations are colocated."
  is_stateful: true
}
op {
  name: "AddV2"
  input_arg {
    name: "x"
    type_attr: "T"
  }
  input_arg {
    name: "y"
    type_attr: "T"
  }
  output_arg {
    name: "z"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_UINT8
        type: DT_INT8
        type: DT_INT16
        type: DT_INT32
        type: DT_INT64
        type: DT_COMPLEX64
        type: DT_COMPLEX128
      }
    }
  }
  summary: "Returns x + y element-wise."
  description: "*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)"
  is_aggregate: true
  is_commutative: true
}
op {
  name: "AdjustContrast"
  input_arg {
    name: "images"
    type_attr: "T"
  }
  input_arg {
    name: "contrast_factor"
    type: DT_FLOAT
  }
  input_arg {
    name: "min_value"
    type: DT_FLOAT
  }
  input_arg {
    name: "max_value"
    type: DT_FLOAT
  }
  output_arg {
    name: "output"
    type: DT_FLOAT
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_UINT8
        type: DT_INT8
        type: DT_INT16
        type: DT_INT32
        type: DT_INT64
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  summary: "Deprecated. Disallowed in GraphDef version >= 2."
  deprecation {
    version: 2
    explanation: "Use AdjustContrastv2 instead"
  }
}
op {
  name: "AdjustContrastv2"
  input_arg {
    name: "images"
    description: "Images to adjust.  At least 3-D."
    type: DT_FLOAT
  }
  input_arg {
    name: "contrast_factor"
    description: "A float multiplier for adjusting contrast."
    type: DT_FLOAT
  }
  output_arg {
    name: "output"
    description: "The contrast-adjusted image or images."
    type: DT_FLOAT
  }
  summary: "Adjust the contrast of one or more images."
  description: "`images` is a tensor of at least 3 dimensions.  The last 3 dimensions are\ninterpreted as `[height, width, channels]`.  The other dimensions only\nrepresent a collection of images, such as `[batch, height, width, channels].`\n\nContrast is adjusted independently for each channel of each image.\n\nFor each channel, the Op first computes the mean of the image pixels in the\nchannel and then adjusts each component of each pixel to\n`(x - mean) * contrast_factor + mean`."
}
op {
  name: "AdjustHue"
  input_arg {
    name: "images"
    description: "Images to adjust.  At least 3-D."
    type: DT_FLOAT
  }
  input_arg {
    name: "delta"
    description: "A float delta to add to the hue."
    type: DT_FLOAT
  }
  output_arg {
    name: "output"
    description: "The hue-adjusted image or images."
    type: DT_FLOAT
  }
  summary: "Adjust the hue of one or more images."
  description: "`images` is a tensor of at least 3 dimensions.  The last dimension is\ninterpretted as channels, and must be three.\n\nThe input image is considered in the RGB colorspace. Conceptually, the RGB\ncolors are first mapped into HSV. A delta is then applied all the hue values,\nand then remapped back to RGB colorspace."
}
op {
  name: "AdjustSaturation"
  input_arg {
    name: "images"
    description: "Images to adjust.  At least 3-D."
    type: DT_FLOAT
  }
  input_arg {
    name: "scale"
    description: "A float scale to add to the saturation."
    type: DT_FLOAT
  }
  output_arg {
    name: "output"
    description: "The hue-adjusted image or images."
    type: DT_FLOAT
  }
  summary: "Adjust the saturation of one or more images."
  description: "`images` is a tensor of at least 3 dimensions.  The last dimension is\ninterpretted as channels, and must be three.\n\nThe input image is considered in the RGB colorspace. Conceptually, the RGB\ncolors are first mapped into HSV. A scale is then applied all the saturation\nvalues, and then remapped back to RGB colorspace."
}
op {
  name: "All"
  input_arg {
    name: "input"
    description: "The tensor to reduce."
    type: DT_BOOL
  }
  input_arg {
    name: "reduction_indices"
    description: "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`."
    type_attr: "Tidx"
  }
  output_arg {
    name: "output"
    description: "The reduced tensor."
    type: DT_BOOL
  }
  attr {
    name: "keep_dims"
    type: "bool"
    default_value {
      b: false
    }
    description: "If true, retain reduced dimensions with length 1."
  }
  attr {
    name: "Tidx"
    type: "type"
    default_value {
      type: DT_INT32
    }
    allowed_values {
      list {
        type: DT_INT32
        type: DT_INT64
      }
    }
  }
  summary: "Computes the \"logical and\" of elements across dimensions of a tensor."
  description: "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1."
}
op {
  name: "AllCandidateSampler"
  input_arg {
    name: "true_classes"
    description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label."
    type: DT_INT64
  }
  output_arg {
    name: "sampled_candidates"
    description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate."
    type: DT_INT64
  }
  output_arg {
    name: "true_expected_count"
    description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability."
    type: DT_FLOAT
  }
  output_arg {
    name: "sampled_expected_count"
    description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates.  If unique=true, then this is a\nprobability."
    type: DT_FLOAT
  }
  attr {
    name: "num_true"
    type: "int"
    description: "Number of true labels per context."
    has_minimum: true
    minimum: 1
  }
  attr {
    name: "num_sampled"
    type: "int"
    description: "Number of candidates to produce."
    has_minimum: true
    minimum: 1
  }
  attr {
    name: "unique"
    type: "bool"
    description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities."
  }
  attr {
    name: "seed"
    type: "int"
    default_value {
      i: 0
    }
    description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
  }
  attr {
    name: "seed2"
    type: "int"
    default_value {
      i: 0
    }
    description: "An second seed to avoid seed collision."
  }
  summary: "Generates labels for candidate sampling with a learned unigram distribution."
  description: "See explanations of candidate sampling and the data formats at\ngo/candidate-sampling.\n\nFor each batch, this op picks a single set of sampled candidate labels.\n\nThe advantages of sampling candidates per-batch are simplicity and the\npossibility of efficient dense matrix multiplication. The disadvantage is that\nthe sampled candidates must be chosen independently of the context and of the\ntrue labels."
  is_stateful: true
}
op {
  name: "Angle"
  input_arg {
    name: "input"
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "Tout"
  }
  attr {
    name: "T"
    type: "type"
    default_value {
      type: DT_COMPLEX64
    }
    allowed_values {
      list {
        type: DT_COMPLEX64
        type: DT_COMPLEX128
      }
    }
  }
  attr {
    name: "Tout"
    type: "type"
    default_value {
      type: DT_FLOAT
    }
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  summary: "Returns the argument of a complex number."
  description: "Given a tensor `input` of complex numbers, this operation returns a tensor of\ntype `float` that is the argument of each element in `input`. All elements in\n`input` must be complex numbers of the form \\\\(a + bj\\\\), where *a*\nis the real part and *b* is the imaginary part.\n\nThe argument returned by this operation is of the form \\\\(atan2(b, a)\\\\).\n\nFor example:\n\n```\n# tensor \'input\' is [-2.25 + 4.75j, 3.25 + 5.75j]\ntf.angle(input) ==> [2.0132, 1.056]\n```\n\n@compatibility(numpy)\nEquivalent to np.angle.\n@end_compatibility"
}
op {
  name: "Any"
  input_arg {
    name: "input"
    description: "The tensor to reduce."
    type: DT_BOOL
  }
  input_arg {
    name: "reduction_indices"
    description: "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`."
    type_attr: "Tidx"
  }
  output_arg {
    name: "output"
    description: "The reduced tensor."
    type: DT_BOOL
  }
  attr {
    name: "keep_dims"
    type: "bool"
    default_value {
      b: false
    }
    description: "If true, retain reduced dimensions with length 1."
  }
  attr {
    name: "Tidx"
    type: "type"
    default_value {
      type: DT_INT32
    }
    allowed_values {
      list {
        type: DT_INT32
        type: DT_INT64
      }
    }
  }
  summary: "Computes the \"logical or\" of elements across dimensions of a tensor."
  description: "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1."
}
op {
  name: "ApplyAdadelta"
  input_arg {
    name: "var"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "accum"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "accum_update"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "lr"
    description: "Scaling factor. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "rho"
    description: "Decay factor. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "epsilon"
    description: "Constant factor. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "grad"
    description: "The gradient."
    type_attr: "T"
  }
  output_arg {
    name: "out"
    description: "Same as \"var\"."
    type_attr: "T"
    is_ref: true
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "use_locking"
    type: "bool"
    default_value {
      b: false
    }
    description: "If True, updating of the var, accum and update_accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
  }
  summary: "Update \'*var\' according to the adadelta scheme."
  description: "accum = rho() * accum + (1 - rho()) * grad.square();\nupdate = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;\nupdate_accum = rho() * update_accum + (1 - rho()) * update.square();\nvar -= update;"
}
op {
  name: "ApplyAdagrad"
  input_arg {
    name: "var"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "accum"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "lr"
    description: "Scaling factor. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "grad"
    description: "The gradient."
    type_attr: "T"
  }
  output_arg {
    name: "out"
    description: "Same as \"var\"."
    type_attr: "T"
    is_ref: true
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "use_locking"
    type: "bool"
    default_value {
      b: false
    }
    description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
  }
  summary: "Update \'*var\' according to the adagrad scheme."
  description: "accum += grad * grad\nvar -= lr * grad * (1 / sqrt(accum))"
}
op {
  name: "ApplyAdagradDA"
  input_arg {
    name: "var"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "gradient_accumulator"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "gradient_squared_accumulator"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "grad"
    description: "The gradient."
    type_attr: "T"
  }
  input_arg {
    name: "lr"
    description: "Scaling factor. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "l1"
    description: "L1 regularization. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "l2"
    description: "L2 regularization. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "global_step"
    description: "Training step number. Must be a scalar."
    type: DT_INT64
  }
  output_arg {
    name: "out"
    description: "Same as \"var\"."
    type_attr: "T"
    is_ref: true
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "use_locking"
    type: "bool"
    default_value {
      b: false
    }
    description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
  }
  summary: "Update \'*var\' according to the proximal adagrad scheme."
}
op {
  name: "ApplyAdam"
  input_arg {
    name: "var"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "m"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "v"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "beta1_power"
    description: "Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "beta2_power"
    description: "Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "lr"
    description: "Scaling factor. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "beta1"
    description: "Momentum factor. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "beta2"
    description: "Momentum factor. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "epsilon"
    description: "Ridge term. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "grad"
    description: "The gradient."
    type_attr: "T"
  }
  output_arg {
    name: "out"
    description: "Same as \"var\"."
    type_attr: "T"
    is_ref: true
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "use_locking"
    type: "bool"
    default_value {
      b: false
    }
    description: "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
  }
  attr {
    name: "use_nesterov"
    type: "bool"
    default_value {
      b: false
    }
    description: "If `True`, uses the nesterov update."
  }
  summary: "Update \'*var\' according to the Adam algorithm."
  description: "lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)\nm_t <- beta1 * m_{t-1} + (1 - beta1) * g_t\nv_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t\nvariable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)"
}
op {
  name: "ApplyCenteredRMSProp"
  input_arg {
    name: "var"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "mg"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "ms"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "mom"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "lr"
    description: "Scaling factor. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "rho"
    description: "Decay rate. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "momentum"
    type_attr: "T"
  }
  input_arg {
    name: "epsilon"
    description: "Ridge term. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "grad"
    description: "The gradient."
    type_attr: "T"
  }
  output_arg {
    name: "out"
    description: "Same as \"var\"."
    type_attr: "T"
    is_ref: true
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "use_locking"
    type: "bool"
    default_value {
      b: false
    }
    description: "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
  }
  summary: "Update \'*var\' according to the centered RMSProp algorithm."
  description: "The centered RMSProp algorithm uses an estimate of the centered second moment\n(i.e., the variance) for normalization, as opposed to regular RMSProp, which\nuses the (uncentered) second moment. This often helps with training, but is\nslightly more expensive in terms of computation and memory.\n\nNote that in dense implementation of this algorithm, mg, ms, and mom will\nupdate even if the grad is zero, but in this sparse implementation, mg, ms,\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nmean_grad = decay * mean_grad + (1-decay) * gradient\n\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)\n\nmg <- rho * mg_{t-1} + (1-rho) * grad\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)\nvar <- var - mom"
}
op {
  name: "ApplyFtrl"
  input_arg {
    name: "var"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "accum"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "linear"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "grad"
    description: "The gradient."
    type_attr: "T"
  }
  input_arg {
    name: "lr"
    description: "Scaling factor. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "l1"
    description: "L1 regulariation. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "l2"
    description: "L2 regulariation. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "lr_power"
    description: "Scaling factor. Must be a scalar."
    type_attr: "T"
  }
  output_arg {
    name: "out"
    description: "Same as \"var\"."
    type_attr: "T"
    is_ref: true
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "use_locking"
    type: "bool"
    default_value {
      b: false
    }
    description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
  }
  summary: "Update \'*var\' according to the Ftrl-proximal scheme."
  description: "accum_new = accum + grad * grad\nlinear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new"
}
op {
  name: "ApplyFtrlV2"
  input_arg {
    name: "var"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "accum"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "linear"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "grad"
    description: "The gradient."
    type_attr: "T"
  }
  input_arg {
    name: "lr"
    description: "Scaling factor. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "l1"
    description: "L1 regulariation. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "l2"
    description: "L2 shrinkage regulariation. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "l2_shrinkage"
    type_attr: "T"
  }
  input_arg {
    name: "lr_power"
    description: "Scaling factor. Must be a scalar."
    type_attr: "T"
  }
  output_arg {
    name: "out"
    description: "Same as \"var\"."
    type_attr: "T"
    is_ref: true
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "use_locking"
    type: "bool"
    default_value {
      b: false
    }
    description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
  }
  summary: "Update \'*var\' according to the Ftrl-proximal scheme."
  description: "grad_with_shrinkage = grad + 2 * l2_shrinkage * var\naccum_new = accum + grad_with_shrinkage * grad_with_shrinkage\nlinear += grad_with_shrinkage +\n    (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new"
}
op {
  name: "ApplyGradientDescent"
  input_arg {
    name: "var"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "alpha"
    description: "Scaling factor. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "delta"
    description: "The change."
    type_attr: "T"
  }
  output_arg {
    name: "out"
    description: "Same as \"var\"."
    type_attr: "T"
    is_ref: true
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "use_locking"
    type: "bool"
    default_value {
      b: false
    }
    description: "If `True`, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
  }
  summary: "Update \'*var\' by subtracting \'alpha\' * \'delta\' from it."
}
op {
  name: "ApplyMomentum"
  input_arg {
    name: "var"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "accum"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "lr"
    description: "Scaling factor. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "grad"
    description: "The gradient."
    type_attr: "T"
  }
  input_arg {
    name: "momentum"
    description: "Momentum. Must be a scalar."
    type_attr: "T"
  }
  output_arg {
    name: "out"
    description: "Same as \"var\"."
    type_attr: "T"
    is_ref: true
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "use_locking"
    type: "bool"
    default_value {
      b: false
    }
    description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
  }
  attr {
    name: "use_nesterov"
    type: "bool"
    default_value {
      b: false
    }
    description: "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum."
  }
  summary: "Update \'*var\' according to the momentum scheme. Set use_nesterov = True if you"
  description: "want to use Nesterov momentum.\n\naccum = accum * momentum + grad\nvar -= lr * accum"
}
op {
  name: "ApplyProximalAdagrad"
  input_arg {
    name: "var"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "accum"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "lr"
    description: "Scaling factor. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "l1"
    description: "L1 regularization. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "l2"
    description: "L2 regularization. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "grad"
    description: "The gradient."
    type_attr: "T"
  }
  output_arg {
    name: "out"
    description: "Same as \"var\"."
    type_attr: "T"
    is_ref: true
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "use_locking"
    type: "bool"
    default_value {
      b: false
    }
    description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention."
  }
  summary: "Update \'*var\' and \'*accum\' according to FOBOS with Adagrad learning rate."
  description: "accum += grad * grad\nprox_v = var - lr * grad * (1 / sqrt(accum))\nvar = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}"
}
op {
  name: "ApplyProximalGradientDescent"
  input_arg {
    name: "var"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "alpha"
    description: "Scaling factor. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "l1"
    description: "L1 regularization. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "l2"
    description: "L2 regularization. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "delta"
    description: "The change."
    type_attr: "T"
  }
  output_arg {
    name: "out"
    description: "Same as \"var\"."
    type_attr: "T"
    is_ref: true
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "use_locking"
    type: "bool"
    default_value {
      b: false
    }
    description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
  }
  summary: "Update \'*var\' as FOBOS algorithm with fixed learning rate."
  description: "prox_v = var - alpha * delta\nvar = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}"
}
op {
  name: "ApplyRMSProp"
  input_arg {
    name: "var"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "ms"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "mom"
    description: "Should be from a Variable()."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "lr"
    description: "Scaling factor. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "rho"
    description: "Decay rate. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "momentum"
    type_attr: "T"
  }
  input_arg {
    name: "epsilon"
    description: "Ridge term. Must be a scalar."
    type_attr: "T"
  }
  input_arg {
    name: "grad"
    description: "The gradient."
    type_attr: "T"
  }
  output_arg {
    name: "out"
    description: "Same as \"var\"."
    type_attr: "T"
    is_ref: true
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "use_locking"
    type: "bool"
    default_value {
      b: false
    }
    description: "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention."
  }
  summary: "Update \'*var\' according to the RMSProp algorithm."
  description: "Note that in dense implementation of this algorithm, ms and mom will\nupdate even if the grad is zero, but in this sparse implementation, ms\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon)\n\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)\nvar <- var - mom"
}
op {
  name: "ApproximateEqual"
  input_arg {
    name: "x"
    type_attr: "T"
  }
  input_arg {
    name: "y"
    type_attr: "T"
  }
  output_arg {
    name: "z"
    type: DT_BOOL
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "tolerance"
    type: "float"
    default_value {
      f: 1e-05
    }
  }
  summary: "Returns the truth value of abs(x-y) < tolerance element-wise."
  is_commutative: true
}
op {
  name: "ArgMax"
  input_arg {
    name: "input"
    type_attr: "T"
  }
  input_arg {
    name: "dimension"
    description: "int32 or int64, must be in the range `[-rank(input), rank(input))`.\nDescribes which dimension of the input Tensor to reduce across. For vectors,\nuse dimension = 0."
    type_attr: "Tidx"
  }
  output_arg {
    name: "output"
    type_attr: "output_type"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "Tidx"
    type: "type"
    default_value {
      type: DT_INT32
    }
    allowed_values {
      list {
        type: DT_INT32
        type: DT_INT64
      }
    }
  }
  attr {
    name: "output_type"
    type: "type"
    default_value {
      type: DT_INT64
    }
    allowed_values {
      list {
        type: DT_INT32
        type: DT_INT64
      }
    }
  }
  summary: "Returns the index with the largest value across dimensions of a tensor."
  description: "Note that in case of ties the identity of the return value is not guaranteed."
}
op {
  name: "ArgMin"
  input_arg {
    name: "input"
    type_attr: "T"
  }
  input_arg {
    name: "dimension"
    description: "int32 or int64, must be in the range `[-rank(input), rank(input))`.\nDescribes which dimension of the input Tensor to reduce across. For vectors,\nuse dimension = 0."
    type_attr: "Tidx"
  }
  output_arg {
    name: "output"
    type_attr: "output_type"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "Tidx"
    type: "type"
    default_value {
      type: DT_INT32
    }
    allowed_values {
      list {
        type: DT_INT32
        type: DT_INT64
      }
    }
  }
  attr {
    name: "output_type"
    type: "type"
    default_value {
      type: DT_INT64
    }
    allowed_values {
      list {
        type: DT_INT32
        type: DT_INT64
      }
    }
  }
  summary: "Returns the index with the smallest value across dimensions of a tensor."
  description: "Note that in case of ties the identity of the return value is not guaranteed."
}
op {
  name: "AsString"
  input_arg {
    name: "input"
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type: DT_STRING
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_INT32
        type: DT_INT64
        type: DT_COMPLEX64
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_BOOL
        type: DT_INT8
      }
    }
  }
  attr {
    name: "precision"
    type: "int"
    default_value {
      i: -1
    }
    description: "The post-decimal precision to use for floating point numbers.\nOnly used if precision > -1."
  }
  attr {
    name: "scientific"
    type: "bool"
    default_value {
      b: false
    }
    description: "Use scientific notation for floating point numbers."
  }
  attr {
    name: "shortest"
    type: "bool"
    default_value {
      b: false
    }
    description: "Use shortest representation (either scientific or standard) for\nfloating point numbers."
  }
  attr {
    name: "width"
    type: "int"
    default_value {
      i: -1
    }
    description: "Pad pre-decimal numbers to this width.\nApplies to both floating point and integer numbers.\nOnly used if width > -1."
  }
  attr {
    name: "fill"
    type: "string"
    default_value {
      s: ""
    }
    description: "The value to pad if width > -1.  If empty, pads with spaces.\nAnother typical value is \'0\'.  String cannot be longer than 1 character."
  }
  summary: "Converts each entry in the given tensor to strings.  Supports many numeric"
  description: "types and boolean."
}
op {
  name: "Asin"
  input_arg {
    name: "x"
    type_attr: "T"
  }
  output_arg {
    name: "y"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT32
        type: DT_INT64
        type: DT_COMPLEX64
        type: DT_COMPLEX128
      }
    }
  }
  summary: "Computes asin of x element-wise."
}
op {
  name: "Asinh"
  input_arg {
    name: "x"
    type_attr: "T"
  }
  output_arg {
    name: "y"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_COMPLEX64
        type: DT_COMPLEX128
      }
    }
  }
  summary: "Computes inverse hyperbolic sine of x element-wise."
}
op {
  name: "Assert"
  input_arg {
    name: "condition"
    description: "The condition to evaluate."
    type: DT_BOOL
  }
  input_arg {
    name: "data"
    description: "The tensors to print out when condition is false."
    type_list_attr: "T"
  }
  attr {
    name: "T"
    type: "list(type)"
    has_minimum: true
    minimum: 1
  }
  attr {
    name: "summarize"
    type: "int"
    default_value {
      i: 3
    }
    description: "Print this many entries of each tensor."
  }
  summary: "Asserts that the given condition is true."
  description: "If `condition` evaluates to false, print the list of tensors in `data`.\n`summarize` determines how many entries of the tensors to print."
  is_stateful: true
}
op {
  name: "Assign"
  input_arg {
    name: "ref"
    description: "Should be from a `Variable` node. May be uninitialized."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "value"
    description: "The value to be assigned to the variable."
    type_attr: "T"
  }
  output_arg {
    name: "output_ref"
    description: "= Same as \"ref\".  Returned as a convenience for operations that want\nto use the new value after the variable has been reset."
    type_attr: "T"
    is_ref: true
  }
  attr {
    name: "T"
    type: "type"
  }
  attr {
    name: "validate_shape"
    type: "bool"
    default_value {
      b: true
    }
    description: "If true, the operation will validate that the shape\nof \'value\' matches the shape of the Tensor being assigned to.  If false,\n\'ref\' will take on the shape of \'value\'."
  }
  attr {
    name: "use_locking"
    type: "bool"
    default_value {
      b: true
    }
    description: "If True, the assignment will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
  }
  summary: "Update \'ref\' by assigning \'value\' to it."
  description: "This operation outputs \"ref\" after the assignment is done.\nThis makes it easier to chain operations that need to use the reset value."
  allows_uninitialized_input: true
}
op {
  name: "AssignAdd"
  input_arg {
    name: "ref"
    description: "Should be from a `Variable` node."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "value"
    description: "The value to be added to the variable."
    type_attr: "T"
  }
  output_arg {
    name: "output_ref"
    description: "= Same as \"ref\".  Returned as a convenience for operations that want\nto use the new value after the variable has been updated."
    type_attr: "T"
    is_ref: true
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "use_locking"
    type: "bool"
    default_value {
      b: false
    }
    description: "If True, the addition will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
  }
  summary: "Update \'ref\' by adding \'value\' to it."
  description: "This operation outputs \"ref\" after the update is done.\nThis makes it easier to chain operations that need to use the reset value."
}
op {
  name: "AssignAddVariableOp"
  input_arg {
    name: "resource"
    description: "handle to the resource in which to store the variable."
    type: DT_RESOURCE
  }
  input_arg {
    name: "value"
    description: "the value by which the variable will be incremented."
    type_attr: "dtype"
  }
  attr {
    name: "dtype"
    type: "type"
    description: "the dtype of the value."
  }
  summary: "Adds a value to the current value of a variable."
  description: "Any ReadVariableOp which depends directly or indirectly on this assign is\nguaranteed to see the incremented value or a subsequent newer one.\n\nOutputs the incremented value, which can be used to totally order the\nincrements to this variable."
  is_stateful: true
}
op {
  name: "AssignSub"
  input_arg {
    name: "ref"
    description: "Should be from a `Variable` node."
    type_attr: "T"
    is_ref: true
  }
  input_arg {
    name: "value"
    description: "The value to be subtracted to the variable."
    type_attr: "T"
  }
  output_arg {
    name: "output_ref"
    description: "= Same as \"ref\".  Returned as a convenience for operations that want\nto use the new value after the variable has been updated."
    type_attr: "T"
    is_ref: true
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "use_locking"
    type: "bool"
    default_value {
      b: false
    }
    description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention."
  }
  summary: "Update \'ref\' by subtracting \'value\' from it."
  description: "This operation outputs \"ref\" after the update is done.\nThis makes it easier to chain operations that need to use the reset value."
}
op {
  name: "AssignSubVariableOp"
  input_arg {
    name: "resource"
    description: "handle to the resource in which to store the variable."
    type: DT_RESOURCE
  }
  input_arg {
    name: "value"
    description: "the value by which the variable will be incremented."
    type_attr: "dtype"
  }
  attr {
    name: "dtype"
    type: "type"
    description: "the dtype of the value."
  }
  summary: "Subtracts a value from the current value of a variable."
  description: "Any ReadVariableOp which depends directly or indirectly on this assign is\nguaranteed to see the incremented value or a subsequent newer one.\n\nOutputs the incremented value, which can be used to totally order the\nincrements to this variable."
  is_stateful: true
}
op {
  name: "AssignVariableOp"
  input_arg {
    name: "resource"
    description: "handle to the resource in which to store the variable."
    type: DT_RESOURCE
  }
  input_arg {
    name: "value"
    description: "the value to set the new tensor to use."
    type_attr: "dtype"
  }
  attr {
    name: "dtype"
    type: "type"
    description: "the dtype of the value."
  }
  summary: "Assigns a new value to a variable."
  description: "Any ReadVariableOp with a control dependency on this op is guaranteed to return\nthis value or a subsequent newer value of the variable."
  is_stateful: true
}
op {
  name: "Atan"
  input_arg {
    name: "x"
    type_attr: "T"
  }
  output_arg {
    name: "y"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT32
        type: DT_INT64
        type: DT_COMPLEX64
        type: DT_COMPLEX128
      }
    }
  }
  summary: "Computes atan of x element-wise."
}
op {
  name: "Atan2"
  input_arg {
    name: "y"
    type_attr: "T"
  }
  input_arg {
    name: "x"
    type_attr: "T"
  }
  output_arg {
    name: "z"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  summary: "Computes arctangent of `y/x` element-wise, respecting signs of the arguments."
  description: "This is the angle \\( \\theta \\in [-\\pi, \\pi] \\) such that\n\\[ x = r \\cos(\\theta) \\]\nand\n\\[ y = r \\sin(\\theta) \\]\nwhere \\(r = \\sqrt(x^2 + y^2) \\)."
}
op {
  name: "Atanh"
  input_arg {
    name: "x"
    type_attr: "T"
  }
  output_arg {
    name: "y"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_COMPLEX64
        type: DT_COMPLEX128
      }
    }
  }
  summary: "Computes inverse hyperbolic tangent of x element-wise."
}
op {
  name: "AudioSpectrogram"
  input_arg {
    name: "input"
    description: "Float representation of audio data."
    type: DT_FLOAT
  }
  output_arg {
    name: "spectrogram"
    description: "3D representation of the audio frequencies as an image."
    type: DT_FLOAT
  }
  attr {
    name: "window_size"
    type: "int"
    description: "How wide the input window is in samples. For the highest efficiency\nthis should be a power of two, but other values are accepted."
  }
  attr {
    name: "stride"
    type: "int"
    description: "How widely apart the center of adjacent sample windows should be."
  }
  attr {
    name: "magnitude_squared"
    type: "bool"
    default_value {
      b: false
    }
    description: "Whether to return the squared magnitude or just the\nmagnitude. Using squared magnitude can avoid extra calculations."
  }
  summary: "Produces a visualization of audio data over time."
  description: "Spectrograms are a standard way of representing audio information as a series of\nslices of frequency information, one slice for each window of time. By joining\nthese together into a sequence, they form a distinctive fingerprint of the sound\nover time.\n\nThis op expects to receive audio data as an input, stored as floats in the range\n-1 to 1, together with a window width in samples, and a stride specifying how\nfar to move the window between slices. From this it generates a three\ndimensional output. The lowest dimension has an amplitude value for each\nfrequency during that time slice. The next dimension is time, with successive\nfrequency slices. The final dimension is for the channels in the input, so a\nstereo audio input would have two here for example.\n\nThis means the layout when converted and saved as an image is rotated 90 degrees\nclockwise from a typical spectrogram. Time is descending down the Y axis, and\nthe frequency decreases from left to right.\n\nEach value in the result represents the square root of the sum of the real and\nimaginary parts of an FFT on the current window of samples. In this way, the\nlowest dimension represents the power of each frequency in the current window,\nand adjacent windows are concatenated in the next dimension.\n\nTo get a more intuitive and visual look at what this operation does, you can run\ntensorflow/examples/wav_to_spectrogram to read in an audio file and save out the\nresulting spectrogram as a PNG image."
}
op {
  name: "AudioSummary"
  input_arg {
    name: "tag"
    description: "Scalar. Used to build the `tag` attribute of the summary values."
    type: DT_STRING
  }
  input_arg {
    name: "tensor"
    description: "2-D of shape `[batch_size, frames]`."
    type: DT_FLOAT
  }
  output_arg {
    name: "summary"
    description: "Scalar. Serialized `Summary` protocol buffer."
    type: DT_STRING
  }
  attr {
    name: "sample_rate"
    type: "float"
    description: "The sample rate of the signal in hertz."
  }
  attr {
    name: "max_outputs"
    type: "int"
    default_value {
      i: 3
    }
    description: "Max number of batch elements to generate audio for."
    has_minimum: true
    minimum: 1
  }
  summary: "Outputs a `Summary` protocol buffer with audio."
  description: "The summary has up to `max_outputs` summary values containing audio. The\naudio is built from `tensor` which must be 3-D with shape `[batch_size,\nframes, channels]` or 2-D with shape `[batch_size, frames]`. The values are\nassumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.\n\nThe `tag` argument is a scalar `Tensor` of type `string`.  It is used to\nbuild the `tag` of the summary values:\n\n*  If `max_outputs` is 1, the summary value tag is \'*tag*/audio\'.\n*  If `max_outputs` is greater than 1, the summary value tags are\n   generated sequentially as \'*tag*/audio/0\', \'*tag*/audio/1\', etc."
  deprecation {
    version: 15
    explanation: "Use AudioSummaryV2."
  }
}
op {
  name: "AudioSummaryV2"
  input_arg {
    name: "tag"
    description: "Scalar. Used to build the `tag` attribute of the summary values."
    type: DT_STRING
  }
  input_arg {
    name: "tensor"
    description: "2-D of shape `[batch_size, frames]`."
    type: DT_FLOAT
  }
  input_arg {
    name: "sample_rate"
    description: "The sample rate of the signal in hertz."
    type: DT_FLOAT
  }
  output_arg {
    name: "summary"
    description: "Scalar. Serialized `Summary` protocol buffer."
    type: DT_STRING
  }
  attr {
    name: "max_outputs"
    type: "int"
    default_value {
      i: 3
    }
    description: "Max number of batch elements to generate audio for."
    has_minimum: true
    minimum: 1
  }
  summary: "Outputs a `Summary` protocol buffer with audio."
  description: "The summary has up to `max_outputs` summary values containing audio. The\naudio is built from `tensor` which must be 3-D with shape `[batch_size,\nframes, channels]` or 2-D with shape `[batch_size, frames]`. The values are\nassumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.\n\nThe `tag` argument is a scalar `Tensor` of type `string`.  It is used to\nbuild the `tag` of the summary values:\n\n*  If `max_outputs` is 1, the summary value tag is \'*tag*/audio\'.\n*  If `max_outputs` is greater than 1, the summary value tags are\n   generated sequentially as \'*tag*/audio/0\', \'*tag*/audio/1\', etc."
}
op {
  name: "AvgPool"
  input_arg {
    name: "value"
    description: "4-D with shape `[batch, height, width, channels]`."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "The average pooled output tensor."
    type_attr: "T"
  }
  attr {
    name: "ksize"
    type: "list(int)"
    description: "The size of the sliding window for each dimension of `value`."
    has_minimum: true
    minimum: 4
  }
  attr {
    name: "strides"
    type: "list(int)"
    description: "The stride of the sliding window for each dimension of `value`."
    has_minimum: true
    minimum: 4
  }
  attr {
    name: "padding"
    type: "string"
    description: "The type of padding algorithm to use."
    allowed_values {
      list {
        s: "SAME"
        s: "VALID"
      }
    }
  }
  attr {
    name: "data_format"
    type: "string"
    default_value {
      s: "NHWC"
    }
    description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
    allowed_values {
      list {
        s: "NHWC"
        s: "NCHW"
      }
    }
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  summary: "Performs average pooling on the input."
  description: "Each entry in `output` is the mean of the corresponding size `ksize`\nwindow in `value`."
}
op {
  name: "AvgPool3D"
  input_arg {
    name: "input"
    description: "Shape `[batch, depth, rows, cols, channels]` tensor to pool over."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "The average pooled output tensor."
    type_attr: "T"
  }
  attr {
    name: "ksize"
    type: "list(int)"
    description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`."
    has_minimum: true
    minimum: 5
  }
  attr {
    name: "strides"
    type: "list(int)"
    description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
    has_minimum: true
    minimum: 5
  }
  attr {
    name: "padding"
    type: "string"
    description: "The type of padding algorithm to use."
    allowed_values {
      list {
        s: "SAME"
        s: "VALID"
      }
    }
  }
  attr {
    name: "data_format"
    type: "string"
    default_value {
      s: "NDHWC"
    }
    description: "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n    [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n    [batch, in_channels, in_depth, in_height, in_width]."
    allowed_values {
      list {
        s: "NDHWC"
        s: "NCDHW"
      }
    }
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  summary: "Performs 3D average pooling on the input."
}
op {
  name: "AvgPool3DGrad"
  input_arg {
    name: "orig_input_shape"
    description: "The original input dimensions."
    type: DT_INT32
  }
  input_arg {
    name: "grad"
    description: "Output backprop of shape `[batch, depth, rows, cols, channels]`."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "The backprop for input."
    type_attr: "T"
  }
  attr {
    name: "ksize"
    type: "list(int)"
    description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`."
    has_minimum: true
    minimum: 5
  }
  attr {
    name: "strides"
    type: "list(int)"
    description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
    has_minimum: true
    minimum: 5
  }
  attr {
    name: "padding"
    type: "string"
    description: "The type of padding algorithm to use."
    allowed_values {
      list {
        s: "SAME"
        s: "VALID"
      }
    }
  }
  attr {
    name: "data_format"
    type: "string"
    default_value {
      s: "NDHWC"
    }
    description: "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n    [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n    [batch, in_channels, in_depth, in_height, in_width]."
    allowed_values {
      list {
        s: "NDHWC"
        s: "NCDHW"
      }
    }
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  summary: "Computes gradients of average pooling function."
}
op {
  name: "AvgPoolGrad"
  input_arg {
    name: "orig_input_shape"
    description: "1-D.  Shape of the original input to `avg_pool`."
    type: DT_INT32
  }
  input_arg {
    name: "grad"
    description: "4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t.\nthe output of `avg_pool`."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "4-D.  Gradients w.r.t. the input of `avg_pool`."
    type_attr: "T"
  }
  attr {
    name: "ksize"
    type: "list(int)"
    description: "The size of the sliding window for each dimension of the input."
    has_minimum: true
    minimum: 4
  }
  attr {
    name: "strides"
    type: "list(int)"
    description: "The stride of the sliding window for each dimension of the input."
    has_minimum: true
    minimum: 4
  }
  attr {
    name: "padding"
    type: "string"
    description: "The type of padding algorithm to use."
    allowed_values {
      list {
        s: "SAME"
        s: "VALID"
      }
    }
  }
  attr {
    name: "data_format"
    type: "string"
    default_value {
      s: "NHWC"
    }
    description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
    allowed_values {
      list {
        s: "NHWC"
        s: "NCHW"
      }
    }
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  summary: "Computes gradients of the average pooling function."
}
op {
  name: "Barrier"
  output_arg {
    name: "handle"
    description: "The handle to the barrier."
    type: DT_STRING
    is_ref: true
  }
  attr {
    name: "component_types"
    type: "list(type)"
    description: "The type of each component in a value."
    has_minimum: true
    minimum: 1
  }
  attr {
    name: "shapes"
    type: "list(shape)"
    default_value {
      list {
      }
    }
    description: "The shape of each component in a value. Each shape must be 1 in the\nfirst dimension. The length of this attr must be the same as the length of\ncomponent_types."
    has_minimum: true
  }
  attr {
    name: "capacity"
    type: "int"
    default_value {
      i: -1
    }
    description: "The capacity of the barrier.  The default capacity is MAX_INT32,\nwhich is the largest capacity of the underlying queue."
  }
  attr {
    name: "container"
    type: "string"
    default_value {
      s: ""
    }
    description: "If non-empty, this barrier is placed in the given container.\nOtherwise, a default container is used."
  }
  attr {
    name: "shared_name"
    type: "string"
    default_value {
      s: ""
    }
    description: "If non-empty, this barrier will be shared under the given name\nacross multiple sessions."
  }
  summary: "Defines a barrier that persists across different graph executions."
  description: "A barrier represents a key-value map, where each key is a string, and\neach value is a tuple of tensors.\n\nAt runtime, the barrier contains \'complete\' and \'incomplete\'\nelements. A complete element has defined tensors for all components of\nits value tuple, and may be accessed using BarrierTakeMany. An\nincomplete element has some undefined components in its value tuple,\nand may be updated using BarrierInsertMany."
  is_stateful: true
}
op {
  name: "BarrierClose"
  input_arg {
    name: "handle"
    description: "The handle to a barrier."
    type: DT_STRING
    is_ref: true
  }
  attr {
    name: "cancel_pending_enqueues"
    type: "bool"
    default_value {
      b: false
    }
    description: "If true, all pending enqueue requests that are\nblocked on the barrier\'s queue will be canceled. InsertMany will fail, even\nif no new key is introduced."
  }
  summary: "Closes the given barrier."
  description: "This operation signals that no more new elements will be inserted in the\ngiven barrier. Subsequent InsertMany that try to introduce a new key will fail.\nSubsequent InsertMany operations that just add missing components to already\nexisting elements will continue to succeed. Subsequent TakeMany operations will\ncontinue to succeed if sufficient completed elements remain in the barrier.\nSubsequent TakeMany operations that would block will fail immediately."
}
op {
  name: "BarrierIncompleteSize"
  input_arg {
    name: "handle"
    description: "The handle to a barrier."
    type: DT_STRING
    is_ref: true
  }
  output_arg {
    name: "size"
    description: "The number of incomplete elements (i.e. those with some of their value\ncomponents not set) in the barrier."
    type: DT_INT32
  }
  summary: "Computes the number of incomplete elements in the given barrier."
}
op {
  name: "BarrierInsertMany"
  input_arg {
    name: "handle"
    description: "The handle to a barrier."
    type: DT_STRING
    is_ref: true
  }
  input_arg {
    name: "keys"
    description: "A one-dimensional tensor of keys, with length n."
    type: DT_STRING
  }
  input_arg {
    name: "values"
    description: "An any-dimensional tensor of values, which are associated with the\nrespective keys. The 0th dimension must have length n."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
  }
  attr {
    name: "component_index"
    type: "int"
    description: "The component of the barrier elements that is being assigned."
  }
  summary: "For each key, assigns the respective value to the specified component."
  description: "If a key is not found in the barrier, this operation will create a new\nincomplete element. If a key is found in the barrier, and the element\nalready has a value at component_index, this operation will fail with\nINVALID_ARGUMENT, and leave the barrier in an undefined state."
}
op {
  name: "BarrierReadySize"
  input_arg {
    name: "handle"
    description: "The handle to a barrier."
    type: DT_STRING
    is_ref: true
  }
  output_arg {
    name: "size"
    description: "The number of complete elements (i.e. those with all of their value\ncomponents set) in the barrier."
    type: DT_INT32
  }
  summary: "Computes the number of complete elements in the given barrier."
}
op {
  name: "BarrierTakeMany"
  input_arg {
    name: "handle"
    description: "The handle to a barrier."
    type: DT_STRING
    is_ref: true
  }
  input_arg {
    name: "num_elements"
    description: "A single-element tensor containing the number of elements to\ntake."
    type: DT_INT32
  }
  output_arg {
    name: "indices"
    description: "A one-dimensional tensor of indices, with length num_elems.\nThese indices refer to the batch in which the values were placed into the\nbarrier (starting with MIN_LONG and increasing with each BarrierInsertMany)."
    type: DT_INT64
  }
  output_arg {
    name: "keys"
    description: "A one-dimensional tensor of keys, with length num_elements."
    type: DT_STRING
  }
  output_arg {
    name: "values"
    description: "One any-dimensional tensor per component in a barrier element. All\nvalues have length num_elements in the 0th dimension."
    type_list_attr: "component_types"
  }
  attr {
    name: "component_types"
    type: "list(type)"
    description: "The type of each component in a value."
    has_minimum: true
    minimum: 1
  }
  attr {
    name: "allow_small_batch"
    type: "bool"
    default_value {
      b: false
    }
    description: "Allow to return less than num_elements items if barrier is\nalready closed."
  }
  attr {
    name: "wait_for_incomplete"
    type: "bool"
    default_value {
      b: false
    }
  }
  attr {
    name: "timeout_ms"
    type: "int"
    default_value {
      i: -1
    }
    description: "If the queue is empty, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet."
  }
  summary: "Takes the given number of completed elements from a barrier."
  description: "This operation concatenates completed-element component tensors along\nthe 0th dimension to make a single component tensor.\n\nElements come out of the barrier when they are complete, and in the order\nin which they were placed into the barrier.  The indices output provides\ninformation about the batch in which each element was originally inserted\ninto the barrier."
}
op {
  name: "BatchCholesky"
  input_arg {
    name: "input"
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_DOUBLE
        type: DT_FLOAT
      }
    }
  }
  deprecation {
    version: 13
    explanation: "Use Cholesky instead."
  }
}
op {
  name: "BatchCholeskyGrad"
  input_arg {
    name: "l"
    type_attr: "T"
  }
  input_arg {
    name: "grad"
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  deprecation {
    version: 13
    explanation: "Use CholeskyGrad instead."
  }
}
op {
  name: "BatchDataset"
  input_arg {
    name: "input_dataset"
    type: DT_VARIANT
  }
  input_arg {
    name: "batch_size"
    description: "A scalar representing the number of elements to accumulate in a\nbatch."
    type: DT_INT64
  }
  output_arg {
    name: "handle"
    type: DT_VARIANT
  }
  attr {
    name: "output_types"
    type: "list(type)"
    has_minimum: true
    minimum: 1
  }
  attr {
    name: "output_shapes"
    type: "list(shape)"
    has_minimum: true
    minimum: 1
  }
  summary: "Creates a dataset that batches `batch_size` elements from `input_dataset`."
}
op {
  name: "BatchFFT"
  input_arg {
    name: "input"
    type: DT_COMPLEX64
  }
  output_arg {
    name: "output"
    type: DT_COMPLEX64
  }
  deprecation {
    version: 15
    explanation: "Use FFT"
  }
}
op {
  name: "BatchFFT2D"
  input_arg {
    name: "input"
    type: DT_COMPLEX64
  }
  output_arg {
    name: "output"
    type: DT_COMPLEX64
  }
  deprecation {
    version: 15
    explanation: "Use FFT2D"
  }
}
op {
  name: "BatchFFT3D"
  input_arg {
    name: "input"
    type: DT_COMPLEX64
  }
  output_arg {
    name: "output"
    type: DT_COMPLEX64
  }
  deprecation {
    version: 15
    explanation: "Use FFT3D"
  }
}
op {
  name: "BatchIFFT"
  input_arg {
    name: "input"
    type: DT_COMPLEX64
  }
  output_arg {
    name: "output"
    type: DT_COMPLEX64
  }
  deprecation {
    version: 15
    explanation: "Use IFFT"
  }
}
op {
  name: "BatchIFFT2D"
  input_arg {
    name: "input"
    type: DT_COMPLEX64
  }
  output_arg {
    name: "output"
    type: DT_COMPLEX64
  }
  deprecation {
    version: 15
    explanation: "Use IFFT2D"
  }
}
op {
  name: "BatchIFFT3D"
  input_arg {
    name: "input"
    type: DT_COMPLEX64
  }
  output_arg {
    name: "output"
    type: DT_COMPLEX64
  }
  deprecation {
    version: 15
    explanation: "Use IFFT3D"
  }
}
op {
  name: "BatchMatMul"
  input_arg {
    name: "x"
    description: "2-D or higher with shape `[..., r_x, c_x]`."
    type_attr: "T"
  }
  input_arg {
    name: "y"
    description: "2-D or higher with shape `[..., r_y, c_y]`."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "3-D or higher with shape `[..., r_o, c_o]`"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT32
        type: DT_COMPLEX64
        type: DT_COMPLEX128
      }
    }
  }
  attr {
    name: "adj_x"
    type: "bool"
    default_value {
      b: false
    }
    description: "If `True`, adjoint the slices of `x`. Defaults to `False`."
  }
  attr {
    name: "adj_y"
    type: "bool"
    default_value {
      b: false
    }
    description: "If `True`, adjoint the slices of `y`. Defaults to `False`."
  }
  summary: "Multiplies slices of two tensors in batches."
  description: "Multiplies all slices of `Tensor` `x` and `y` (each slice can be\nviewed as an element of a batch), and arranges the individual results\nin a single output tensor of the same batch size. Each of the\nindividual slices can optionally be adjointed (to adjoint a matrix\nmeans to transpose and conjugate it) before multiplication by setting\nthe `adj_x` or `adj_y` flag to `True`, which are by default `False`.\n\nThe input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`\nand `[..., r_y, c_y]`.\n\nThe output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:\n\n    r_o = c_x if adj_x else r_x\n    c_o = r_y if adj_y else c_y\n\nIt is computed as:\n\n    output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])"
}
op {
  name: "BatchMatrixBandPart"
  input_arg {
    name: "input"
    type_attr: "T"
  }
  input_arg {
    name: "num_lower"
    type: DT_INT64
  }
  input_arg {
    name: "num_upper"
    type: DT_INT64
  }
  output_arg {
    name: "band"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
  }
  deprecation {
    version: 14
    explanation: "Use MatrixBandPart"
  }
}
op {
  name: "BatchMatrixDeterminant"
  input_arg {
    name: "input"
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_COMPLEX64
        type: DT_COMPLEX128
      }
    }
  }
  deprecation {
    version: 13
    explanation: "Use MatrixDeterminant instead."
  }
}
op {
  name: "BatchMatrixDiag"
  input_arg {
    name: "diagonal"
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
  }
  deprecation {
    version: 14
    explanation: "Use MatrixDiag"
  }
}
op {
  name: "BatchMatrixDiagPart"
  input_arg {
    name: "input"
    type_attr: "T"
  }
  output_arg {
    name: "diagonal"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
  }
  deprecation {
    version: 14
    explanation: "Use MatrixDiagPart"
  }
}
op {
  name: "BatchMatrixInverse"
  input_arg {
    name: "input"
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "adjoint"
    type: "bool"
    default_value {
      b: false
    }
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_DOUBLE
        type: DT_FLOAT
      }
    }
  }
  deprecation {
    version: 13
    explanation: "Use MatrixInverse instead."
  }
}
op {
  name: "BatchMatrixSetDiag"
  input_arg {
    name: "input"
    type_attr: "T"
  }
  input_arg {
    name: "diagonal"
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
  }
  deprecation {
    version: 14
    explanation: "Use MatrixSetDiag"
  }
}
op {
  name: "BatchMatrixSolve"
  input_arg {
    name: "matrix"
    type_attr: "T"
  }
  input_arg {
    name: "rhs"
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "adjoint"
    type: "bool"
    default_value {
      b: false
    }
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_DOUBLE
        type: DT_FLOAT
      }
    }
  }
  deprecation {
    version: 13
    explanation: "Use MatrixSolve instead."
  }
}
op {
  name: "BatchMatrixSolveLs"
  input_arg {
    name: "matrix"
    type_attr: "T"
  }
  input_arg {
    name: "rhs"
    type_attr: "T"
  }
  input_arg {
    name: "l2_regularizer"
    type: DT_DOUBLE
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_DOUBLE
        type: DT_FLOAT
      }
    }
  }
  attr {
    name: "fast"
    type: "bool"
    default_value {
      b: true
    }
  }
  deprecation {
    version: 13
    explanation: "Use MatrixSolveLs instead."
  }
}
op {
  name: "BatchMatrixTriangularSolve"
  input_arg {
    name: "matrix"
    type_attr: "T"
  }
  input_arg {
    name: "rhs"
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "lower"
    type: "bool"
    default_value {
      b: true
    }
  }
  attr {
    name: "adjoint"
    type: "bool"
    default_value {
      b: false
    }
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_DOUBLE
        type: DT_FLOAT
      }
    }
  }
  deprecation {
    version: 13
    explanation: "Use MatrixTriangularSolve instead."
  }
}
op {
  name: "BatchNormWithGlobalNormalization"
  input_arg {
    name: "t"
    description: "A 4D input Tensor."
    type_attr: "T"
  }
  input_arg {
    name: "m"
    description: "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof."
    type_attr: "T"
  }
  input_arg {
    name: "v"
    description: "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof."
    type_attr: "T"
  }
  input_arg {
    name: "beta"
    description: "A 1D beta Tensor with size matching the last dimension of t.\nAn offset to be added to the normalized tensor."
    type_attr: "T"
  }
  input_arg {
    name: "gamma"
    description: "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this tensor will be multiplied\nwith the normalized tensor."
    type_attr: "T"
  }
  output_arg {
    name: "result"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "variance_epsilon"
    type: "float"
    description: "A small float number to avoid dividing by 0."
  }
  attr {
    name: "scale_after_normalization"
    type: "bool"
    description: "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma."
  }
  summary: "Batch normalization."
  description: "This op is deprecated. Prefer `tf.nn.batch_normalization`."
  deprecation {
    version: 9
    explanation: "Use tf.nn.batch_normalization()"
  }
}
op {
  name: "BatchNormWithGlobalNormalizationGrad"
  input_arg {
    name: "t"
    description: "A 4D input Tensor."
    type_attr: "T"
  }
  input_arg {
    name: "m"
    description: "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof."
    type_attr: "T"
  }
  input_arg {
    name: "v"
    description: "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof."
    type_attr: "T"
  }
  input_arg {
    name: "gamma"
    description: "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this Tensor will be multiplied\nwith the normalized Tensor."
    type_attr: "T"
  }
  input_arg {
    name: "backprop"
    description: "4D backprop Tensor."
    type_attr: "T"
  }
  output_arg {
    name: "dx"
    description: "4D backprop tensor for input."
    type_attr: "T"
  }
  output_arg {
    name: "dm"
    description: "1D backprop tensor for mean."
    type_attr: "T"
  }
  output_arg {
    name: "dv"
    description: "1D backprop tensor for variance."
    type_attr: "T"
  }
  output_arg {
    name: "db"
    description: "1D backprop tensor for beta."
    type_attr: "T"
  }
  output_arg {
    name: "dg"
    description: "1D backprop tensor for gamma."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "variance_epsilon"
    type: "float"
    description: "A small float number to avoid dividing by 0."
  }
  attr {
    name: "scale_after_normalization"
    type: "bool"
    description: "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma."
  }
  summary: "Gradients for batch normalization."
  description: "This op is deprecated. See `tf.nn.batch_normalization`."
  deprecation {
    version: 9
    explanation: "Use tf.nn.batch_normalization()"
  }
}
op {
  name: "BatchSelfAdjointEig"
  input_arg {
    name: "input"
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_DOUBLE
        type: DT_FLOAT
      }
    }
  }
  deprecation {
    version: 11
    explanation: "Use SelfAdjointEigV2 instead."
  }
}
op {
  name: "BatchSelfAdjointEigV2"
  input_arg {
    name: "input"
    type_attr: "T"
  }
  output_arg {
    name: "e"
    type_attr: "T"
  }
  output_arg {
    name: "v"
    type_attr: "T"
  }
  attr {
    name: "compute_v"
    type: "bool"
    default_value {
      b: true
    }
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_DOUBLE
        type: DT_FLOAT
      }
    }
  }
  deprecation {
    version: 13
    explanation: "Use SelfAdjointEigV2 instead."
  }
}
op {
  name: "BatchSvd"
  input_arg {
    name: "input"
    type_attr: "T"
  }
  output_arg {
    name: "s"
    type_attr: "T"
  }
  output_arg {
    name: "u"
    type_attr: "T"
  }
  output_arg {
    name: "v"
    type_attr: "T"
  }
  attr {
    name: "compute_uv"
    type: "bool"
    default_value {
      b: true
    }
  }
  attr {
    name: "full_matrices"
    type: "bool"
    default_value {
      b: false
    }
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_DOUBLE
        type: DT_FLOAT
        type: DT_COMPLEX64
        type: DT_COMPLEX128
      }
    }
  }
  deprecation {
    version: 13
    explanation: "Use Svd instead."
  }
}
op {
  name: "BatchToSpace"
  input_arg {
    name: "input"
    description: "4-D tensor with shape\n`[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,\n  depth]`. Note that the batch size of the input tensor must be divisible by\n`block_size * block_size`."
    type_attr: "T"
  }
  input_arg {
    name: "crops"
    description: "2-D tensor of non-negative integers with shape `[2, 2]`. It specifies\nhow many elements to crop from the intermediate result across the spatial\ndimensions as follows:\n\n    crops = [[crop_top, crop_bottom], [crop_left, crop_right]]"
    type_attr: "Tidx"
  }
  output_arg {
    name: "output"
    description: "4-D with shape `[batch, height, width, depth]`, where:\n\n      height = height_pad - crop_top - crop_bottom\n      width = width_pad - crop_left - crop_right\n\nThe attr `block_size` must be greater than one. It indicates the block size.\n\nSome examples:\n\n(1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:\n\n```\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 1]` and value:\n\n```\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\n(2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:\n\n```\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 3]` and value:\n\n```\nx = [[[[1, 2, 3], [4, 5, 6]],\n      [[7, 8, 9], [10, 11, 12]]]]\n```\n\n(3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:\n\n```\nx = [[[[1], [3]], [[9], [11]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```\n\nThe output tensor has shape `[1, 4, 4, 1]` and value:\n\n```\nx = [[[1],   [2],  [3],  [4]],\n     [[5],   [6],  [7],  [8]],\n     [[9],  [10], [11],  [12]],\n     [[13], [14], [15],  [16]]]\n```\n\n(4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:\n\n```\nx = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],\n     [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]\n```\n\nThe output tensor has shape `[2, 2, 4, 1]` and value:\n\n```\nx = [[[[1], [3]], [[5], [7]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
  }
  attr {
    name: "block_size"
    type: "int"
    has_minimum: true
    minimum: 2
  }
  attr {
    name: "Tidx"
    type: "type"
    default_value {
      type: DT_INT32
    }
    allowed_values {
      list {
        type: DT_INT32
        type: DT_INT64
      }
    }
  }
  summary: "BatchToSpace for 4-D tensors of type T."
  description: "This is a legacy version of the more general BatchToSpaceND.\n\nRearranges (permutes) data from batch into blocks of spatial data, followed by\ncropping. This is the reverse transformation of SpaceToBatch. More specifically,\nthis op outputs a copy of the input tensor where values from the `batch`\ndimension are moved in spatial blocks to the `height` and `width` dimensions,\nfollowed by cropping along the `height` and `width` dimensions."
}
op {
  name: "BatchToSpaceND"
  input_arg {
    name: "input"
    description: "N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\nwhere spatial_shape has M dimensions."
    type_attr: "T"
  }
  input_arg {
    name: "block_shape"
    description: "1-D with shape `[M]`, all values must be >= 1."
    type_attr: "Tblock_shape"
  }
  input_arg {
    name: "crops"
    description: "2-D with shape `[M, 2]`, all values must be >= 0.\n  `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input\n  dimension `i + 1`, which corresponds to spatial dimension `i`.  It is\n  required that\n  `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.\n\nThis operation is equivalent to the following steps:\n\n1. Reshape `input` to `reshaped` of shape:\n     [block_shape[0], ..., block_shape[M-1],\n      batch / prod(block_shape),\n      input_shape[1], ..., input_shape[N-1]]\n\n2. Permute dimensions of `reshaped` to produce `permuted` of shape\n     [batch / prod(block_shape),\n\n      input_shape[1], block_shape[0],\n      ...,\n      input_shape[M], block_shape[M-1],\n\n      input_shape[M+1], ..., input_shape[N-1]]\n\n3. Reshape `permuted` to produce `reshaped_permuted` of shape\n     [batch / prod(block_shape),\n\n      input_shape[1] * block_shape[0],\n      ...,\n      input_shape[M] * block_shape[M-1],\n\n      input_shape[M+1],\n      ...,\n      input_shape[N-1]]\n\n4. Crop the start and end of dimensions `[1, ..., M]` of\n   `reshaped_permuted` according to `crops` to produce the output of shape:\n     [batch / prod(block_shape),\n\n      input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],\n      ...,\n      input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],\n\n      input_shape[M+1], ..., input_shape[N-1]]\n\nSome examples:\n\n(1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and\n    `crops = [[0, 0], [0, 0]]`:\n\n```\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 1]` and value:\n\n```\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\n(2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and\n    `crops = [[0, 0], [0, 0]]`:\n\n```\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 3]` and value:\n\n```\nx = [[[[1, 2, 3], [4, 5, 6]],\n      [[7, 8, 9], [10, 11, 12]]]]\n```\n\n(3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and\n    `crops = [[0, 0], [0, 0]]`:\n\n```\nx = [[[[1], [3]], [[9], [11]]],\n     [[[2], [4]], [[10], [12]]],\n     [[[5], [7]], [[13], [15]]],\n     [[[6], [8]], [[14], [16]]]]\n```\n\nThe output tensor has shape `[1, 4, 4, 1]` and value:\n\n```\nx = [[[1],   [2],  [3],  [4]],\n     [[5],   [6],  [7],  [8]],\n     [[9],  [10], [11],  [12]],\n     [[13], [14], [15],  [16]]]\n```\n\n(4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and\n    `crops = [[0, 0], [2, 0]]`:\n\n```\nx = [[[[0], [1], [3]]], [[[0], [9], [11]]],\n     [[[0], [2], [4]]], [[[0], [10], [12]]],\n     [[[0], [5], [7]]], [[[0], [13], [15]]],\n     [[[0], [6], [8]]], [[[0], [14], [16]]]]\n```\n\nThe output tensor has shape `[2, 2, 4, 1]` and value:\n\n```\nx = [[[[1],   [2],  [3],  [4]],\n      [[5],   [6],  [7],  [8]]],\n     [[[9],  [10], [11],  [12]],\n      [[13], [14], [15],  [16]]]]\n```"
    type_attr: "Tcrops"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
  }
  attr {
    name: "Tblock_shape"
    type: "type"
    default_value {
      type: DT_INT32
    }
    allowed_values {
      list {
        type: DT_INT32
        type: DT_INT64
      }
    }
  }
  attr {
    name: "Tcrops"
    type: "type"
    default_value {
      type: DT_INT32
    }
    allowed_values {
      list {
        type: DT_INT32
        type: DT_INT64
      }
    }
  }
  summary: "BatchToSpace for N-D tensors of type T."
  description: "This operation reshapes the \"batch\" dimension 0 into `M + 1` dimensions of shape\n`block_shape + [batch]`, interleaves these blocks back into the grid defined by\nthe spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as\nthe input.  The spatial dimensions of this intermediate result are then\noptionally cropped according to `crops` to produce the output.  This is the\nreverse of SpaceToBatch.  See below for a precise description."
}
op {
  name: "Betainc"
  input_arg {
    name: "a"
    type_attr: "T"
  }
  input_arg {
    name: "b"
    type_attr: "T"
  }
  input_arg {
    name: "x"
    type_attr: "T"
  }
  output_arg {
    name: "z"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  summary: "Compute the regularized incomplete beta integral \\\\(I_x(a, b)\\\\)."
  description: "The regularized incomplete beta integral is defined as:\n\n\n\\\\(I_x(a, b) = \\frac{B(x; a, b)}{B(a, b)}\\\\)\n\nwhere\n\n\n\\\\(B(x; a, b) = \\int_0^x t^{a-1} (1 - t)^{b-1} dt\\\\)\n\n\nis the incomplete beta function and \\\\(B(a, b)\\\\) is the *complete*\nbeta function."
}
op {
  name: "BiasAdd"
  input_arg {
    name: "value"
    description: "Any number of dimensions."
    type_attr: "T"
  }
  input_arg {
    name: "bias"
    description: "1-D with size the last dimension of `value`."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "Broadcasted sum of `value` and `bias`."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "data_format"
    type: "string"
    default_value {
      s: "NHWC"
    }
    description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the bias tensor will be added to the last dimension\nof the value tensor.\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width].\nThe tensor will be added to \"in_channels\", the third-to-the-last\n    dimension."
    allowed_values {
      list {
        s: "NHWC"
        s: "NCHW"
      }
    }
  }
  summary: "Adds `bias` to `value`."
  description: "This is a special case of `tf.add` where `bias` is restricted to be 1-D.\nBroadcasting is supported, so `value` may have any number of dimensions."
}
op {
  name: "BiasAddGrad"
  input_arg {
    name: "out_backprop"
    description: "Any number of dimensions."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "1-D with size the feature dimension of `out_backprop`."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "data_format"
    type: "string"
    default_value {
      s: "NHWC"
    }
    description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the bias tensor will be added to the last dimension\nof the value tensor.\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width].\nThe tensor will be added to \"in_channels\", the third-to-the-last\n    dimension."
    allowed_values {
      list {
        s: "NHWC"
        s: "NCHW"
      }
    }
  }
  summary: "The backward operation for \"BiasAdd\" on the \"bias\" tensor."
  description: "It accumulates all the values from out_backprop into the feature dimension.\nFor NHWC data format, the feature dimension is the last. For NCHW data format,\nthe feature dimension is the third-to-last."
}
op {
  name: "BiasAddV1"
  input_arg {
    name: "value"
    description: "Any number of dimensions."
    type_attr: "T"
  }
  input_arg {
    name: "bias"
    description: "1-D with size the last dimension of `value`."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "Broadcasted sum of `value` and `bias`."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  summary: "Adds `bias` to `value`."
  description: "This is a deprecated version of BiasAdd and will be soon removed.\n\nThis is a special case of `tf.add` where `bias` is restricted to be 1-D.\nBroadcasting is supported, so `value` may have any number of dimensions."
}
op {
  name: "Bincount"
  input_arg {
    name: "arr"
    description: "int32 `Tensor`."
    type: DT_INT32
  }
  input_arg {
    name: "size"
    description: "non-negative int32 scalar `Tensor`."
    type: DT_INT32
  }
  input_arg {
    name: "weights"
    description: "is an int32, int64, float32, or float64 `Tensor` with the same\nshape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights\nequal to 1."
    type_attr: "T"
  }
  output_arg {
    name: "bins"
    description: "1D `Tensor` with length equal to `size`. The counts or summed weights for\neach value in the range [0, size)."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_INT32
        type: DT_INT64
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  summary: "Counts the number of occurrences of each value in an integer array."
  description: "Outputs a vector with length `size` and the same dtype as `weights`. If\n`weights` are empty, then index `i` stores the number of times the value `i` is\ncounted in `arr`. If `weights` are non-empty, then index `i` stores the sum of\nthe value in `weights` at each index where the corresponding value in `arr` is\n`i`.\n\nValues in `arr` outside of the range [0, size) are ignored."
}
op {
  name: "Bitcast"
  input_arg {
    name: "input"
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "type"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT8
        type: DT_INT16
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT16
        type: DT_QUINT16
        type: DT_QINT32
        type: DT_HALF
      }
    }
  }
  attr {
    name: "type"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT8
        type: DT_INT16
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT16
        type: DT_QUINT16
        type: DT_QINT32
        type: DT_HALF
      }
    }
  }
  summary: "Bitcasts a tensor from one type to another without copying data."
  description: "Given a tensor `input`, this operation returns a tensor that has the same buffer\ndata as `input` with datatype `type`.\n\nIf the input datatype `T` is larger than the output datatype `type` then the\nshape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].\n\nIf `T` is smaller than `type`, the operator requires that the rightmost\ndimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from\n[..., sizeof(`type`)/sizeof(`T`)] to [...].\n\n*NOTE*: Bitcast is implemented as a low-level cast, so machines with different\nendian orderings will give different results."
}
op {
  name: "BitwiseAnd"
  input_arg {
    name: "x"
    type_attr: "T"
  }
  input_arg {
    name: "y"
    type_attr: "T"
  }
  output_arg {
    name: "z"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_INT8
        type: DT_INT16
        type: DT_INT32
        type: DT_INT64
        type: DT_UINT8
        type: DT_UINT16
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  summary: "Elementwise computes the bitwise AND of `x` and `y`."
  description: "The result will have those bits set, that are set in both `x` and `y`. The\ncomputation is performed on the underlying representations of `x` and `y`."
  is_commutative: true
}
op {
  name: "BitwiseOr"
  input_arg {
    name: "x"
    type_attr: "T"
  }
  input_arg {
    name: "y"
    type_attr: "T"
  }
  output_arg {
    name: "z"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_INT8
        type: DT_INT16
        type: DT_INT32
        type: DT_INT64
        type: DT_UINT8
        type: DT_UINT16
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  summary: "Elementwise computes the bitwise OR of `x` and `y`."
  description: "The result will have those bits set, that are set in `x`, `y` or both. The\ncomputation is performed on the underlying representations of `x` and `y`."
  is_commutative: true
}
op {
  name: "BitwiseXor"
  input_arg {
    name: "x"
    type_attr: "T"
  }
  input_arg {
    name: "y"
    type_attr: "T"
  }
  output_arg {
    name: "z"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_INT8
        type: DT_INT16
        type: DT_INT32
        type: DT_INT64
        type: DT_UINT8
        type: DT_UINT16
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  summary: "Elementwise computes the bitwise XOR of `x` and `y`."
  description: "The result will have those bits set, that are different in `x` and `y`. The\ncomputation is performed on the underlying representations of `x` and `y`."
  is_commutative: true
}
op {
  name: "BroadcastArgs"
  input_arg {
    name: "s0"
    type_attr: "T"
  }
  input_arg {
    name: "s1"
    type_attr: "T"
  }
  output_arg {
    name: "r0"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    default_value {
      type: DT_INT32
    }
    allowed_values {
      list {
        type: DT_INT32
        type: DT_INT64
      }
    }
  }
  summary: "Return the shape of s0 op s1 with broadcast."
  description: "Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the\nbroadcasted shape. `s0`, `s1` and `r0` are all integer vectors."
}
op {
  name: "BroadcastGradientArgs"
  input_arg {
    name: "s0"
    type_attr: "T"
  }
  input_arg {
    name: "s1"
    type_attr: "T"
  }
  output_arg {
    name: "r0"
    type_attr: "T"
  }
  output_arg {
    name: "r1"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    default_value {
      type: DT_INT32
    }
    allowed_values {
      list {
        type: DT_INT32
        type: DT_INT64
      }
    }
  }
  summary: "Return the reduction indices for computing gradients of s0 op s1 with broadcast."
  description: "This is typically used by gradient computations for a broadcasting operation."
}
op {
  name: "Bucketize"
  input_arg {
    name: "input"
    description: "Any shape of Tensor contains with int or float type."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "Same shape with \'input\', each value of input replaced with bucket index.\n\n@compatibility(numpy)\nEquivalent to np.digitize.\n@end_compatibility"
    type: DT_INT32
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_INT32
        type: DT_INT64
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  attr {
    name: "boundaries"
    type: "list(float)"
    description: "A sorted list of floats gives the boundary of the buckets."
  }
  summary: "Bucketizes \'input\' based on \'boundaries\'."
  description: "For example, if the inputs are\n    boundaries = [0, 10, 100]\n    input = [[-5, 10000]\n             [150,   10]\n             [5,    100]]\n\nthen the output will be\n    output = [[0, 3]\n              [3, 2]\n              [1, 3]]"
}
op {
  name: "CTCBeamSearchDecoder"
  input_arg {
    name: "inputs"
    description: "3-D, shape: `(max_time x batch_size x num_classes)`, the logits."
    type: DT_FLOAT
  }
  input_arg {
    name: "sequence_length"
    description: "A vector containing sequence lengths, size `(batch)`."
    type: DT_INT32
  }
  output_arg {
    name: "decoded_indices"
    description: "A list (length: top_paths) of indices matrices.  Matrix j,\nsize `(total_decoded_outputs[j] x 2)`, has indices of a\n`SparseTensor`.  The rows store: [batch, time]."
    type: DT_INT64
    number_attr: "top_paths"
  }
  output_arg {
    name: "decoded_values"
    description: "A list (length: top_paths) of values vectors.  Vector j,\nsize `(length total_decoded_outputs[j])`, has the values of a\n`SparseTensor`.  The vector stores the decoded classes for beam j."
    type: DT_INT64
    number_attr: "top_paths"
  }
  output_arg {
    name: "decoded_shape"
    description: "A list (length: top_paths) of shape vector.  Vector j,\nsize `(2)`, stores the shape of the decoded `SparseTensor[j]`.\nIts values are: `[batch_size, max_decoded_length[j]]`."
    type: DT_INT64
    number_attr: "top_paths"
  }
  output_arg {
    name: "log_probability"
    description: "A matrix, shaped: `(batch_size x top_paths)`.  The\nsequence log-probabilities."
    type: DT_FLOAT
  }
  attr {
    name: "beam_width"
    type: "int"
    description: "A scalar >= 0 (beam search beam width)."
    has_minimum: true
    minimum: 1
  }
  attr {
    name: "top_paths"
    type: "int"
    description: "A scalar >= 0, <= beam_width (controls output size)."
    has_minimum: true
    minimum: 1
  }
  attr {
    name: "merge_repeated"
    type: "bool"
    default_value {
      b: true
    }
    description: "If true, merge repeated classes in output."
  }
  summary: "Performs beam search decoding on the logits given in input."
  description: "A note about the attribute merge_repeated: For the beam search decoder,\nthis means that if consecutive entries in a beam are the same, only\nthe first of these is emitted.  That is, when the top path is \"A B B B B\",\n\"A B\" is returned if merge_repeated = True but \"A B B B B\" is\nreturned if merge_repeated = False."
}
op {
  name: "CTCGreedyDecoder"
  input_arg {
    name: "inputs"
    description: "3-D, shape: `(max_time x batch_size x num_classes)`, the logits."
    type: DT_FLOAT
  }
  input_arg {
    name: "sequence_length"
    description: "A vector containing sequence lengths, size `(batch_size)`."
    type: DT_INT32
  }
  output_arg {
    name: "decoded_indices"
    description: "Indices matrix, size `(total_decoded_outputs x 2)`,\nof a `SparseTensor`.  The rows store: [batch, time]."
    type: DT_INT64
  }
  output_arg {
    name: "decoded_values"
    description: "Values vector, size: `(total_decoded_outputs)`,\nof a `SparseTensor`.  The vector stores the decoded classes."
    type: DT_INT64
  }
  output_arg {
    name: "decoded_shape"
    description: "Shape vector, size `(2)`, of the decoded SparseTensor.\nValues are: `[batch_size, max_decoded_length]`."
    type: DT_INT64
  }
  output_arg {
    name: "log_probability"
    description: "Matrix, size `(batch_size x 1)`, containing sequence\nlog-probabilities."
    type: DT_FLOAT
  }
  attr {
    name: "merge_repeated"
    type: "bool"
    default_value {
      b: false
    }
    description: "If True, merge repeated classes in output."
  }
  summary: "Performs greedy decoding on the logits given in inputs."
  description: "A note about the attribute merge_repeated: if enabled, when\nconsecutive logits\' maximum indices are the same, only the first of\nthese is emitted.  Labeling the blank \'*\', the sequence \"A B B * B B\"\nbecomes \"A B B\" if merge_repeated = True and \"A B B B B\" if\nmerge_repeated = False.\n\nRegardless of the value of merge_repeated, if the maximum index of a given\ntime and batch corresponds to the blank, index `(num_classes - 1)`, no new\nelement is emitted."
}
op {
  name: "CTCLoss"
  input_arg {
    name: "inputs"
    description: "3-D, shape: `(max_time x batch_size x num_classes)`, the logits."
    type: DT_FLOAT
  }
  input_arg {
    name: "labels_indices"
    description: "The indices of a `SparseTensor`.\n`labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for\n`(batch b, time t)`."
    type: DT_INT64
  }
  input_arg {
    name: "labels_values"
    description: "The values (labels) associated with the given batch and time."
    type: DT_INT32
  }
  input_arg {
    name: "sequence_length"
    description: "A vector containing sequence lengths (batch)."
    type: DT_INT32
  }
  output_arg {
    name: "loss"
    description: "A vector (batch) containing log-probabilities."
    type: DT_FLOAT
  }
  output_arg {
    name: "gradient"
    description: "The gradient of `loss`.  3-D, shape:\n`(max_time x batch_size x num_classes)`."
    type: DT_FLOAT
  }
  attr {
    name: "preprocess_collapse_repeated"
    type: "bool"
    default_value {
      b: false
    }
    description: "Scalar, if true then repeated labels are\ncollapsed prior to the CTC calculation."
  }
  attr {
    name: "ctc_merge_repeated"
    type: "bool"
    default_value {
      b: true
    }
    description: "Scalar.  If set to false, *during* CTC calculation\nrepeated non-blank labels will not be merged and are interpreted as\nindividual labels.  This is a simplified version of CTC."
  }
  attr {
    name: "ignore_longer_outputs_than_inputs"
    type: "bool"
    default_value {
      b: false
    }
    description: "Scalar. If set to true, during CTC\ncalculation, items that have longer output sequences than input sequences\nare skipped: they don\'t contribute to the loss term and have zero-gradient."
  }
  summary: "Calculates the CTC Loss (log probability) for each batch entry.  Also calculates"
  description: "the gradient.  This class performs the softmax operation for you, so inputs\nshould be e.g. linear projections of outputs by an LSTM."
}
op {
  name: "CacheDataset"
  input_arg {
    name: "input_dataset"
    type: DT_VARIANT
  }
  input_arg {
    name: "filename"
    description: "A path on the filesystem where we should cache the dataset. Note: this\nwill be a directory."
    type: DT_STRING
  }
  output_arg {
    name: "handle"
    type: DT_VARIANT
  }
  attr {
    name: "output_types"
    type: "list(type)"
    has_minimum: true
    minimum: 1
  }
  attr {
    name: "output_shapes"
    type: "list(shape)"
    has_minimum: true
    minimum: 1
  }
  summary: "Creates a dataset that caches elements from `input_dataset`."
  description: "A CacheDataset will iterate over the input_dataset, and store tensors. If the\ncache already exists, the cache will be used. If the cache is inappropriate\n(e.g. cannot be opened, contains tensors of the wrong shape / size), an error\nwill the returned when used."
}
op {
  name: "Cast"
  input_arg {
    name: "x"
    type_attr: "SrcT"
  }
  output_arg {
    name: "y"
    type_attr: "DstT"
  }
  attr {
    name: "SrcT"
    type: "type"
  }
  attr {
    name: "DstT"
    type: "type"
  }
  summary: "Cast x of type SrcT to y of DstT."
}
op {
  name: "Ceil"
  input_arg {
    name: "x"
    type_attr: "T"
  }
  output_arg {
    name: "y"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  summary: "Returns element-wise smallest integer in not less than x."
}
op {
  name: "CheckNumerics"
  input_arg {
    name: "tensor"
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  attr {
    name: "message"
    type: "string"
    description: "Prefix of the error message."
  }
  summary: "Checks a tensor for NaN and Inf values."
  description: "When run, reports an `InvalidArgument` error if `tensor` has any values\nthat are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is."
}
op {
  name: "Cholesky"
  input_arg {
    name: "input"
    description: "Shape is `[..., M, M]`."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "Shape is `[..., M, M]`."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_DOUBLE
        type: DT_FLOAT
        type: DT_COMPLEX64
        type: DT_COMPLEX128
      }
    }
  }
  summary: "Computes the Cholesky decomposition of one or more square matrices."
  description: "The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices.\n\nThe input has to be symmetric and positive definite. Only the lower-triangular\npart of the input will be used for this operation. The upper-triangular part\nwill not be read.\n\nThe output is a tensor of the same shape as the input\ncontaining the Cholesky decompositions for all input submatrices `[..., :, :]`.\n\n**Note**: The gradient computation on GPU is faster for large matrices but\nnot for large batch dimensions when the submatrices are small. In this\ncase it might be faster to use the CPU."
}
op {
  name: "CholeskyGrad"
  input_arg {
    name: "l"
    description: "Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.\nAlgorithm depends only on lower triangular part of the innermost matrices of\nthis tensor."
    type_attr: "T"
  }
  input_arg {
    name: "grad"
    description: "df/dl where f is some scalar function. Shape is `[..., M, M]`.\nAlgorithm depends only on lower triangular part of the innermost matrices of\nthis tensor."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "Symmetrized version of df/dA . Shape is `[..., M, M]`"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  summary: "Computes the reverse mode backpropagated gradient of the Cholesky algorithm."
  description: "For an explanation see \"Differentiation of the Cholesky algorithm\" by\nIain Murray http://arxiv.org/abs/1602.07527."
}
op {
  name: "CompareAndBitpack"
  input_arg {
    name: "input"
    description: "Values to compare against `threshold` and bitpack."
    type_attr: "T"
  }
  input_arg {
    name: "threshold"
    description: "Threshold to compare against."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "The bitpacked comparisons."
    type: DT_UINT8
  }
  attr {
    name: "T"
    type: "type"
    description: "The type of the input and threshold."
    allowed_values {
      list {
        type: DT_BOOL
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT8
        type: DT_INT16
        type: DT_INT32
        type: DT_INT64
      }
    }
  }
  summary: "Compare values of `input` to `threshold` and pack resulting bits into a `uint8`."
  description: "Each comparison returns a boolean `true` (if `input_value > threshold`)\nor and `false` otherwise.\n\nThis operation is useful for Locality-Sensitive-Hashing (LSH) and other\nalgorithms that use hashing approximations of cosine and `L2` distances;\ncodes can be generated from an input via:\n\n```python\ncodebook_size = 50\ncodebook_bits = codebook_size * 32\ncodebook = tf.get_variable(\'codebook\', [x.shape[-1].value, codebook_bits],\n                           dtype=x.dtype,\n                           initializer=tf.orthogonal_initializer())\ncodes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.)\ncodes = tf.bitcast(codes, tf.int32)  # go from uint8 to int32\n# now codes has shape x.shape[:-1] + [codebook_size]\n```\n\n**NOTE**: Currently, the innermost dimension of the tensor must be divisible\nby 8.\n\nGiven an `input` shaped `[s0, s1, ..., s_n]`, the output is\na `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`."
}
op {
  name: "Complex"
  input_arg {
    name: "real"
    type_attr: "T"
  }
  input_arg {
    name: "imag"
    type_attr: "T"
  }
  output_arg {
    name: "out"
    type_attr: "Tout"
  }
  attr {
    name: "T"
    type: "type"
    default_value {
      type: DT_FLOAT
    }
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  attr {
    name: "Tout"
    type: "type"
    default_value {
      type: DT_COMPLEX64
    }
    allowed_values {
      list {
        type: DT_COMPLEX64
        type: DT_COMPLEX128
      }
    }
  }
  summary: "Converts two real numbers to a complex number."
  description: "Given a tensor `real` representing the real part of a complex number, and a\ntensor `imag` representing the imaginary part of a complex number, this\noperation returns complex numbers elementwise of the form \\\\(a + bj\\\\), where\n*a* represents the `real` part and *b* represents the `imag` part.\n\nThe input tensors `real` and `imag` must have the same shape.\n\nFor example:\n\n```\n# tensor \'real\' is [2.25, 3.25]\n# tensor `imag` is [4.75, 5.75]\ntf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]\n```"
}
op {
  name: "ComplexAbs"
  input_arg {
    name: "x"
    type_attr: "T"
  }
  output_arg {
    name: "y"
    type_attr: "Tout"
  }
  attr {
    name: "T"
    type: "type"
    default_value {
      type: DT_COMPLEX64
    }
    allowed_values {
      list {
        type: DT_COMPLEX64
        type: DT_COMPLEX128
      }
    }
  }
  attr {
    name: "Tout"
    type: "type"
    default_value {
      type: DT_FLOAT
    }
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  summary: "Computes the complex absolute value of a tensor."
  description: "Given a tensor `x` of complex numbers, this operation returns a tensor of type\n`float` or `double` that is the absolute value of each element in `x`. All\nelements in `x` must be complex numbers of the form \\\\(a + bj\\\\). The absolute\nvalue is computed as \\\\( \\sqrt{a^2 + b^2}\\\\)."
}
op {
  name: "ComputeAccidentalHits"
  input_arg {
    name: "true_classes"
    description: "The true_classes output of UnpackSparseLabels."
    type: DT_INT64
  }
  input_arg {
    name: "sampled_candidates"
    description: "The sampled_candidates output of CandidateSampler."
    type: DT_INT64
  }
  output_arg {
    name: "indices"
    description: "A vector of indices corresponding to rows of true_candidates."
    type: DT_INT32
  }
  output_arg {
    name: "ids"
    description: "A vector of IDs of positions in sampled_candidates that match a true_label\nfor the row with the corresponding index in indices."
    type: DT_INT64
  }
  output_arg {
    name: "weights"
    description: "A vector of the same length as indices and ids, in which each element\nis -FLOAT_MAX."
    type: DT_FLOAT
  }
  attr {
    name: "num_true"
    type: "int"
    description: "Number of true labels per context."
  }
  attr {
    name: "seed"
    type: "int"
    default_value {
      i: 0
    }
    description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed.  Otherwise, it is seeded by a\nrandom seed."
  }
  attr {
    name: "seed2"
    type: "int"
    default_value {
      i: 0
    }
    description: "An second seed to avoid seed collision."
  }
  summary: "Computes the ids of the positions in sampled_candidates that match true_labels."
  description: "When doing log-odds NCE, the result of this op should be passed through a\nSparseToDense op, then added to the logits of the sampled candidates. This has\nthe effect of \'removing\' the sampled labels that match the true labels by\nmaking the classifier sure that they are sampled labels."
}
op {
  name: "Concat"
  input_arg {
    name: "concat_dim"
    description: "0-D.  The dimension along which to concatenate.  Must be in the\nrange [0, rank(values))."
    type: DT_INT32
  }
  input_arg {
    name: "values"
    description: "The `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`."
    type_attr: "T"
    number_attr: "N"
  }
  output_arg {
    name: "output"
    description: "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension.  This tensor\'s shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes."
    type_attr: "T"
  }
  attr {
    name: "N"
    type: "int"
    has_minimum: true
    minimum: 2
  }
  attr {
    name: "T"
    type: "type"
  }
  summary: "Concatenates tensors along one dimension."
}
op {
  name: "ConcatOffset"
  input_arg {
    name: "concat_dim"
    description: "The dimension along which to concatenate."
    type: DT_INT32
  }
  input_arg {
    name: "shape"
    description: "The `N` int32 vectors representing shape of tensors being concatenated."
    type: DT_INT32
    number_attr: "N"
  }
  output_arg {
    name: "offset"
    description: "The `N` int32 vectors representing the starting offset\nof input tensors within the concatenated output."
    type: DT_INT32
    number_attr: "N"
  }
  attr {
    name: "N"
    type: "int"
    has_minimum: true
    minimum: 2
  }
  summary: "Computes offsets of concat inputs within its output."
  description: "For example:\n\n```\n# \'x\' is [2, 2, 7]\n# \'y\' is [2, 3, 7]\n# \'z\' is [2, 5, 7]\nconcat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]\n```\n\nThis is typically used by gradient computations for a concat operation."
}
op {
  name: "ConcatV2"
  input_arg {
    name: "values"
    description: "List of `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`."
    type_attr: "T"
    number_attr: "N"
  }
  input_arg {
    name: "axis"
    description: "0-D.  The dimension along which to concatenate.  Must be in the\nrange [-rank(values), rank(values))."
    type_attr: "Tidx"
  }
  output_arg {
    name: "output"
    description: "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension.  This tensor\'s shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes."
    type_attr: "T"
  }
  attr {
    name: "N"
    type: "int"
    has_minimum: true
    minimum: 2
  }
  attr {
    name: "T"
    type: "type"
  }
  attr {
    name: "Tidx"
    type: "type"
    default_value {
      type: DT_INT32
    }
    allowed_values {
      list {
        type: DT_INT32
        type: DT_INT64
      }
    }
  }
  summary: "Concatenates tensors along one dimension."
}
op {
  name: "ConcatenateDataset"
  input_arg {
    name: "input_dataset"
    type: DT_VARIANT
  }
  input_arg {
    name: "another_dataset"
    type: DT_VARIANT
  }
  output_arg {
    name: "handle"
    type: DT_VARIANT
  }
  attr {
    name: "output_types"
    type: "list(type)"
    has_minimum: true
    minimum: 1
  }
  attr {
    name: "output_shapes"
    type: "list(shape)"
    has_minimum: true
    minimum: 1
  }
  summary: "Creates a dataset that concatenates `input_dataset` with `another_dataset`."
}
op {
  name: "ConditionalAccumulator"
  output_arg {
    name: "handle"
    description: "The handle to the accumulator."
    type: DT_STRING
    is_ref: true
  }
  attr {
    name: "dtype"
    type: "type"
    description: "The type of the value being accumulated."
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "shape"
    type: "shape"
    description: "The shape of the values, can be [], in which case shape is unknown."
  }
  attr {
    name: "container"
    type: "string"
    default_value {
      s: ""
    }
    description: "If non-empty, this accumulator is placed in the given container.\nOtherwise, a default container is used."
  }
  attr {
    name: "shared_name"
    type: "string"
    default_value {
      s: ""
    }
    description: "If non-empty, this accumulator will be shared under the\ngiven name across multiple sessions."
  }
  summary: "A conditional accumulator for aggregating gradients."
  description: "The accumulator accepts gradients marked with local_step greater or\nequal to the most recent global_step known to the accumulator. The\naverage can be extracted from the accumulator, provided sufficient\ngradients have been accumulated. Extracting the average automatically\nresets the aggregate to 0, and increments the global_step recorded by\nthe accumulator."
  is_stateful: true
}
op {
  name: "Conj"
  input_arg {
    name: "input"
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    default_value {
      type: DT_COMPLEX64
    }
    allowed_values {
      list {
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_VARIANT
      }
    }
  }
  summary: "Returns the complex conjugate of a complex number."
  description: "Given a tensor `input` of complex numbers, this operation returns a tensor of\ncomplex numbers that are the complex conjugate of each element in `input`. The\ncomplex numbers in `input` must be of the form \\\\(a + bj\\\\), where *a* is the\nreal part and *b* is the imaginary part.\n\nThe complex conjugate returned by this operation is of the form \\\\(a - bj\\\\).\n\nFor example:\n\n```\n# tensor \'input\' is [-2.25 + 4.75j, 3.25 + 5.75j]\ntf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]\n```"
}
op {
  name: "ConjugateTranspose"
  input_arg {
    name: "x"
    type_attr: "T"
  }
  input_arg {
    name: "perm"
    type_attr: "Tperm"
  }
  output_arg {
    name: "y"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
  }
  attr {
    name: "Tperm"
    type: "type"
    default_value {
      type: DT_INT32
    }
    allowed_values {
      list {
        type: DT_INT32
        type: DT_INT64
      }
    }
  }
  summary: "Shuffle dimensions of x according to a permutation and conjugate the result."
  description: "The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:\n  `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`\n  `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])`"
}
op {
  name: "Const"
  output_arg {
    name: "output"
    type_attr: "dtype"
  }
  attr {
    name: "value"
    type: "tensor"
    description: "Attr `value` is the tensor to return."
  }
  attr {
    name: "dtype"
    type: "type"
  }
  summary: "Returns a constant tensor."
}
op {
  name: "ControlTrigger"
  summary: "Does nothing. Serves as a control trigger for scheduling."
  description: "Only useful as a placeholder for control edges."
}
op {
  name: "Conv2D"
  input_arg {
    name: "input"
    description: "A 4-D tensor. The dimension order is interpreted according to the value\nof `data_format`, see below for details."
    type_attr: "T"
  }
  input_arg {
    name: "filter"
    description: "A 4-D tensor of shape\n`[filter_height, filter_width, in_channels, out_channels]`"
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "A 4-D tensor. The dimension order is determined by the value of\n`data_format`, see below for details."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
      }
    }
  }
  attr {
    name: "strides"
    type: "list(int)"
    description: "1-D tensor of length 4.  The stride of the sliding window for each\ndimension of `input`. The dimension order is determined by the value of\n  `data_format`, see below for details."
  }
  attr {
    name: "use_cudnn_on_gpu"
    type: "bool"
    default_value {
      b: true
    }
  }
  attr {
    name: "padding"
    type: "string"
    description: "The type of padding algorithm to use."
    allowed_values {
      list {
        s: "SAME"
        s: "VALID"
      }
    }
  }
  attr {
    name: "data_format"
    type: "string"
    default_value {
      s: "NHWC"
    }
    description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, height, width, channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, channels, height, width]."
    allowed_values {
      list {
        s: "NHWC"
        s: "NCHW"
      }
    }
  }
  summary: "Computes a 2-D convolution given 4-D `input` and `filter` tensors."
  description: "Given an input tensor of shape `[batch, in_height, in_width, in_channels]`\nand a filter / kernel tensor of shape\n`[filter_height, filter_width, in_channels, out_channels]`, this op\nperforms the following:\n\n1. Flattens the filter to a 2-D matrix with shape\n   `[filter_height * filter_width * in_channels, output_channels]`.\n2. Extracts image patches from the input tensor to form a *virtual*\n   tensor of shape `[batch, out_height, out_width,\n   filter_height * filter_width * in_channels]`.\n3. For each patch, right-multiplies the filter matrix and the image patch\n   vector.\n\nIn detail, with the default NHWC format,\n\n    output[b, i, j, k] =\n        sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *\n                        filter[di, dj, q, k]\n\nMust have `strides[0] = strides[3] = 1`.  For the most common case of the same\nhorizontal and vertices strides, `strides = [1, stride, stride, 1]`."
}
op {
  name: "Conv2DBackpropFilter"
  input_arg {
    name: "input"
    description: "4-D with shape `[batch, in_height, in_width, in_channels]`."
    type_attr: "T"
  }
  input_arg {
    name: "filter_sizes"
    description: "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 4-D\n`[filter_height, filter_width, in_channels, out_channels]` tensor."
    type: DT_INT32
  }
  input_arg {
    name: "out_backprop"
    description: "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.\nthe `filter` input of the convolution."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
      }
    }
  }
  attr {
    name: "strides"
    type: "list(int)"
    description: "The stride of the sliding window for each dimension of the input\nof the convolution. Must be in the same order as the dimension specified with\nformat."
  }
  attr {
    name: "use_cudnn_on_gpu"
    type: "bool"
    default_value {
      b: true
    }
  }
  attr {
    name: "padding"
    type: "string"
    description: "The type of padding algorithm to use."
    allowed_values {
      list {
        s: "SAME"
        s: "VALID"
      }
    }
  }
  attr {
    name: "data_format"
    type: "string"
    default_value {
      s: "NHWC"
    }
    description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
    allowed_values {
      list {
        s: "NHWC"
        s: "NCHW"
      }
    }
  }
  summary: "Computes the gradients of convolution with respect to the filter."
}
op {
  name: "Conv2DBackpropInput"
  input_arg {
    name: "input_sizes"
    description: "An integer vector representing the shape of `input`,\nwhere `input` is a 4-D `[batch, height, width, channels]` tensor."
    type: DT_INT32
  }
  input_arg {
    name: "filter"
    description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`."
    type_attr: "T"
  }
  input_arg {
    name: "out_backprop"
    description: "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient\nw.r.t. the input of the convolution."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
      }
    }
  }
  attr {
    name: "strides"
    type: "list(int)"
    description: "The stride of the sliding window for each dimension of the input\nof the convolution. Must be in the same order as the dimension specified with\nformat."
  }
  attr {
    name: "use_cudnn_on_gpu"
    type: "bool"
    default_value {
      b: true
    }
  }
  attr {
    name: "padding"
    type: "string"
    description: "The type of padding algorithm to use."
    allowed_values {
      list {
        s: "SAME"
        s: "VALID"
      }
    }
  }
  attr {
    name: "data_format"
    type: "string"
    default_value {
      s: "NHWC"
    }
    description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, in_channels, in_height, in_width]."
    allowed_values {
      list {
        s: "NHWC"
        s: "NCHW"
      }
    }
  }
  summary: "Computes the gradients of convolution with respect to the input."
}
op {
  name: "Conv3D"
  input_arg {
    name: "input"
    description: "Shape `[batch, in_depth, in_height, in_width, in_channels]`."
    type_attr: "T"
  }
  input_arg {
    name: "filter"
    description: "Shape `[filter_depth, filter_height, filter_width, in_channels,\nout_channels]`. `in_channels` must match between `input` and `filter`."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  attr {
    name: "strides"
    type: "list(int)"
    description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
    has_minimum: true
    minimum: 5
  }
  attr {
    name: "padding"
    type: "string"
    description: "The type of padding algorithm to use."
    allowed_values {
      list {
        s: "SAME"
        s: "VALID"
      }
    }
  }
  attr {
    name: "data_format"
    type: "string"
    default_value {
      s: "NDHWC"
    }
    description: "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n    [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n    [batch, in_channels, in_depth, in_height, in_width]."
    allowed_values {
      list {
        s: "NDHWC"
        s: "NCDHW"
      }
    }
  }
  summary: "Computes a 3-D convolution given 5-D `input` and `filter` tensors."
  description: "In signal processing, cross-correlation is a measure of similarity of\ntwo waveforms as a function of a time-lag applied to one of them. This\nis also known as a sliding dot product or sliding inner-product.\n\nOur Conv3D implements a form of cross-correlation."
}
op {
  name: "Conv3DBackpropFilter"
  input_arg {
    name: "input"
    description: "Shape `[batch, depth, rows, cols, in_channels]`."
    type_attr: "T"
  }
  input_arg {
    name: "filter"
    description: "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`."
    type_attr: "T"
  }
  input_arg {
    name: "out_backprop"
    description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  attr {
    name: "strides"
    type: "list(int)"
    description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
    has_minimum: true
    minimum: 5
  }
  attr {
    name: "padding"
    type: "string"
    description: "The type of padding algorithm to use."
    allowed_values {
      list {
        s: "SAME"
        s: "VALID"
      }
    }
  }
  summary: "Computes the gradients of 3-D convolution with respect to the filter."
  deprecation {
    version: 10
    explanation: "Use Conv3DBackpropFilterV2"
  }
}
op {
  name: "Conv3DBackpropFilterV2"
  input_arg {
    name: "input"
    description: "Shape `[batch, depth, rows, cols, in_channels]`."
    type_attr: "T"
  }
  input_arg {
    name: "filter_sizes"
    description: "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 5-D\n`[filter_depth, filter_height, filter_width, in_channels, out_channels]`\ntensor."
    type: DT_INT32
  }
  input_arg {
    name: "out_backprop"
    description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  attr {
    name: "strides"
    type: "list(int)"
    description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
    has_minimum: true
    minimum: 5
  }
  attr {
    name: "padding"
    type: "string"
    description: "The type of padding algorithm to use."
    allowed_values {
      list {
        s: "SAME"
        s: "VALID"
      }
    }
  }
  attr {
    name: "data_format"
    type: "string"
    default_value {
      s: "NDHWC"
    }
    description: "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n    [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n    [batch, in_channels, in_depth, in_height, in_width]."
    allowed_values {
      list {
        s: "NDHWC"
        s: "NCDHW"
      }
    }
  }
  summary: "Computes the gradients of 3-D convolution with respect to the filter."
}
op {
  name: "Conv3DBackpropInput"
  input_arg {
    name: "input"
    description: "Shape `[batch, depth, rows, cols, in_channels]`."
    type_attr: "T"
  }
  input_arg {
    name: "filter"
    description: "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`."
    type_attr: "T"
  }
  input_arg {
    name: "out_backprop"
    description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  attr {
    name: "strides"
    type: "list(int)"
    description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
    has_minimum: true
    minimum: 5
  }
  attr {
    name: "padding"
    type: "string"
    description: "The type of padding algorithm to use."
    allowed_values {
      list {
        s: "SAME"
        s: "VALID"
      }
    }
  }
  summary: "Computes the gradients of 3-D convolution with respect to the input."
  deprecation {
    version: 10
    explanation: "Use Conv3DBackpropInputV2"
  }
}
op {
  name: "Conv3DBackpropInputV2"
  input_arg {
    name: "input_sizes"
    description: "An integer vector representing the tensor shape of `input`,\nwhere `input` is a 5-D\n`[batch, depth, rows, cols, in_channels]` tensor."
    type: DT_INT32
  }
  input_arg {
    name: "filter"
    description: "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`."
    type_attr: "T"
  }
  input_arg {
    name: "out_backprop"
    description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  attr {
    name: "strides"
    type: "list(int)"
    description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`."
    has_minimum: true
    minimum: 5
  }
  attr {
    name: "padding"
    type: "string"
    description: "The type of padding algorithm to use."
    allowed_values {
      list {
        s: "SAME"
        s: "VALID"
      }
    }
  }
  attr {
    name: "data_format"
    type: "string"
    default_value {
      s: "NDHWC"
    }
    description: "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n    [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n    [batch, in_channels, in_depth, in_height, in_width]."
    allowed_values {
      list {
        s: "NDHWC"
        s: "NCDHW"
      }
    }
  }
  summary: "Computes the gradients of 3-D convolution with respect to the input."
}
op {
  name: "Copy"
  input_arg {
    name: "input"
    description: "Input tensor."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "Output tensor, deep-copied from input."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
  }
  attr {
    name: "tensor_name"
    type: "string"
    default_value {
      s: ""
    }
    description: "The name of the input tensor."
  }
  attr {
    name: "debug_ops_spec"
    type: "list(string)"
    default_value {
      list {
      }
    }
    description: "A list of debug op spec (op, url, gated_grpc) for attached debug\nops. Each element of the list has the format\n;;, wherein gated_grpc is boolean represented\nas 0/1. E.g., \"DebugIdentity;grpc://foo:3333;1\",\n\"DebugIdentity;file:///tmp/tfdbg_1;0\"."
  }
  summary: "Copy Op."
  description: "Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the\ndevice on which the tensor is allocated.\nN.B.: If the all downstream attached debug ops are disabled given the current\ngRPC gating status, the output will simply forward the input tensor without\ndeep-copying. See the documentation of Debug* ops for more details.\n\nUnlike the CopyHost Op, this op does not have HostMemory constraint on its\ninput or output."
  allows_uninitialized_input: true
}
op {
  name: "CopyHost"
  input_arg {
    name: "input"
    description: "Input tensor."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "Output tensor, deep-copied from input."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
  }
  attr {
    name: "tensor_name"
    type: "string"
    default_value {
      s: ""
    }
    description: "The name of the input tensor."
  }
  attr {
    name: "debug_ops_spec"
    type: "list(string)"
    default_value {
      list {
      }
    }
    description: "A list of debug op spec (op, url, gated_grpc) for attached debug\nops. Each element of the list has the format\n;;, wherein gated_grpc is boolean represented\nas 0/1. E.g., \"DebugIdentity;grpc://foo:3333;1\",\n\"DebugIdentity;file:///tmp/tfdbg_1;0\"."
  }
  summary: "Copy Host Op."
  description: "Performs CPU-to-CPU deep-copying of tensor.\nN.B.: If the all downstream attached debug ops are disabled given the current\ngRPC gating status, the output will simply forward the input tensor without\ndeep-copying. See the documentation of Debug* ops for more details.\n\nUnlike the Copy Op, this op has HostMemory constraint on its input or output."
  allows_uninitialized_input: true
}
op {
  name: "Cos"
  input_arg {
    name: "x"
    type_attr: "T"
  }
  output_arg {
    name: "y"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_COMPLEX64
        type: DT_COMPLEX128
      }
    }
  }
  summary: "Computes cos of x element-wise."
}
op {
  name: "Cosh"
  input_arg {
    name: "x"
    type_attr: "T"
  }
  output_arg {
    name: "y"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_COMPLEX64
        type: DT_COMPLEX128
      }
    }
  }
  summary: "Computes hyperbolic cosine of x element-wise."
}
op {
  name: "CountUpTo"
  input_arg {
    name: "ref"
    description: "Should be from a scalar `Variable` node."
    type_attr: "T"
    is_ref: true
  }
  output_arg {
    name: "output"
    description: "A copy of the input before increment. If nothing else modifies the\ninput, the values produced will all be distinct."
    type_attr: "T"
  }
  attr {
    name: "limit"
    type: "int"
    description: "If incrementing ref would bring it above limit, instead generates an\n\'OutOfRange\' error."
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_INT32
        type: DT_INT64
      }
    }
  }
  summary: "Increments \'ref\' until it reaches \'limit\'."
}
op {
  name: "CropAndResize"
  input_arg {
    name: "image"
    description: "A 4-D tensor of shape `[batch, image_height, image_width, depth]`.\nBoth `image_height` and `image_width` need to be positive."
    type_attr: "T"
  }
  input_arg {
    name: "boxes"
    description: "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values."
    type: DT_FLOAT
  }
  input_arg {
    name: "box_ind"
    description: "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to."
    type: DT_INT32
  }
  input_arg {
    name: "crop_size"
    description: "A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All\ncropped image patches are resized to this size. The aspect ratio of the image\ncontent is not preserved. Both `crop_height` and `crop_width` need to be\npositive."
    type: DT_INT32
  }
  output_arg {
    name: "crops"
    description: "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`."
    type: DT_FLOAT
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT8
        type: DT_INT16
        type: DT_INT32
        type: DT_INT64
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  attr {
    name: "method"
    type: "string"
    default_value {
      s: "bilinear"
    }
    description: "A string specifying the interpolation method. Only \'bilinear\' is\nsupported for now."
    allowed_values {
      list {
        s: "bilinear"
      }
    }
  }
  attr {
    name: "extrapolation_value"
    type: "float"
    default_value {
      f: 0
    }
    description: "Value used for extrapolation, when applicable."
  }
  summary: "Extracts crops from the input image tensor and bilinearly resizes them (possibly"
  description: "with aspect ratio change) to a common output size specified by `crop_size`. This\nis more general than the `crop_to_bounding_box` op which extracts a fixed size\nslice from the input image and does not allow resizing or aspect ratio change.\n\nReturns a tensor with `crops` from the input `image` at positions defined at the\nbounding box locations in `boxes`. The cropped boxes are all resized (with\nbilinear interpolation) to a fixed `size = [crop_height, crop_width]`. The\nresult is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`. The\nresizing is corner aligned. In particular, if `boxes = [[0, 0, 1, 1]]`, the\nmethod will give identical results to using `tf.image.resize_bilinear()`\nwith `align_corners=True`."
}
op {
  name: "CropAndResizeGradBoxes"
  input_arg {
    name: "grads"
    description: "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`."
    type: DT_FLOAT
  }
  input_arg {
    name: "image"
    description: "A 4-D tensor of shape `[batch, image_height, image_width, depth]`.\nBoth `image_height` and `image_width` need to be positive."
    type_attr: "T"
  }
  input_arg {
    name: "boxes"
    description: "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values."
    type: DT_FLOAT
  }
  input_arg {
    name: "box_ind"
    description: "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to."
    type: DT_INT32
  }
  output_arg {
    name: "output"
    description: "A 2-D tensor of shape `[num_boxes, 4]`."
    type: DT_FLOAT
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT8
        type: DT_INT16
        type: DT_INT32
        type: DT_INT64
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  attr {
    name: "method"
    type: "string"
    default_value {
      s: "bilinear"
    }
    description: "A string specifying the interpolation method. Only \'bilinear\' is\nsupported for now."
    allowed_values {
      list {
        s: "bilinear"
      }
    }
  }
  summary: "Computes the gradient of the crop_and_resize op wrt the input boxes tensor."
}
op {
  name: "CropAndResizeGradImage"
  input_arg {
    name: "grads"
    description: "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`."
    type: DT_FLOAT
  }
  input_arg {
    name: "boxes"
    description: "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values."
    type: DT_FLOAT
  }
  input_arg {
    name: "box_ind"
    description: "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to."
    type: DT_INT32
  }
  input_arg {
    name: "image_size"
    description: "A 1-D tensor with value `[batch, image_height, image_width, depth]`\ncontaining the original image size. Both `image_height` and `image_width` need\nto be positive."
    type: DT_INT32
  }
  output_arg {
    name: "output"
    description: "A 4-D tensor of shape `[batch, image_height, image_width, depth]`."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_HALF
        type: DT_DOUBLE
      }
    }
  }
  attr {
    name: "method"
    type: "string"
    default_value {
      s: "bilinear"
    }
    description: "A string specifying the interpolation method. Only \'bilinear\' is\nsupported for now."
    allowed_values {
      list {
        s: "bilinear"
      }
    }
  }
  summary: "Computes the gradient of the crop_and_resize op wrt the input image tensor."
}
op {
  name: "Cross"
  input_arg {
    name: "a"
    description: "A tensor containing 3-element vectors."
    type_attr: "T"
  }
  input_arg {
    name: "b"
    description: "Another tensor, of same type and shape as `a`."
    type_attr: "T"
  }
  output_arg {
    name: "product"
    description: "Pairwise cross product of the vectors in `a` and `b`."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT32
        type: DT_INT64
        type: DT_UINT8
        type: DT_INT16
        type: DT_INT8
        type: DT_UINT16
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  summary: "Compute the pairwise cross product."
  description: "`a` and `b` must be the same shape; they can either be simple 3-element vectors,\nor any shape where the innermost dimension is 3. In the latter case, each pair\nof corresponding 3-element vectors is cross-multiplied independently."
}
op {
  name: "Cumprod"
  input_arg {
    name: "x"
    description: "A `Tensor`. Must be one of the following types: `float32`, `float64`,\n`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,\n`complex128`, `qint8`, `quint8`, `qint32`, `half`."
    type_attr: "T"
  }
  input_arg {
    name: "axis"
    description: "A `Tensor` of type `int32` (default: 0). Must be in the range\n`[-rank(x), rank(x))`."
    type_attr: "Tidx"
  }
  output_arg {
    name: "out"
    type_attr: "T"
  }
  attr {
    name: "exclusive"
    type: "bool"
    default_value {
      b: false
    }
    description: "If `True`, perform exclusive cumprod."
  }
  attr {
    name: "reverse"
    type: "bool"
    default_value {
      b: false
    }
    description: "A `bool` (default: False)."
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "Tidx"
    type: "type"
    default_value {
      type: DT_INT32
    }
    allowed_values {
      list {
        type: DT_INT32
        type: DT_INT64
      }
    }
  }
  summary: "Compute the cumulative product of the tensor `x` along `axis`."
  description: "By default, this op performs an inclusive cumprod, which means that the first\nelement of the input is identical to the first element of the output:\n\n```python\ntf.cumprod([a, b, c])  # => [a, a * b, a * b * c]\n```\n\nBy setting the `exclusive` kwarg to `True`, an exclusive cumprod is\nperformed instead:\n\n```python\ntf.cumprod([a, b, c], exclusive=True)  # => [1, a, a * b]\n```\n\nBy setting the `reverse` kwarg to `True`, the cumprod is performed in the\nopposite direction:\n\n```python\ntf.cumprod([a, b, c], reverse=True)  # => [a * b * c, b * c, c]\n```\n\nThis is more efficient than using separate `tf.reverse` ops.\n\nThe `reverse` and `exclusive` kwargs can also be combined:\n\n```python\ntf.cumprod([a, b, c], exclusive=True, reverse=True)  # => [b * c, c, 1]\n```"
}
op {
  name: "Cumsum"
  input_arg {
    name: "x"
    description: "A `Tensor`. Must be one of the following types: `float32`, `float64`,\n`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,\n`complex128`, `qint8`, `quint8`, `qint32`, `half`."
    type_attr: "T"
  }
  input_arg {
    name: "axis"
    description: "A `Tensor` of type `int32` (default: 0). Must be in the range\n`[-rank(x), rank(x))`."
    type_attr: "Tidx"
  }
  output_arg {
    name: "out"
    type_attr: "T"
  }
  attr {
    name: "exclusive"
    type: "bool"
    default_value {
      b: false
    }
    description: "If `True`, perform exclusive cumsum."
  }
  attr {
    name: "reverse"
    type: "bool"
    default_value {
      b: false
    }
    description: "A `bool` (default: False)."
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT64
        type: DT_INT32
        type: DT_UINT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT8
        type: DT_COMPLEX64
        type: DT_COMPLEX128
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT32
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "Tidx"
    type: "type"
    default_value {
      type: DT_INT32
    }
    allowed_values {
      list {
        type: DT_INT32
        type: DT_INT64
      }
    }
  }
  summary: "Compute the cumulative sum of the tensor `x` along `axis`."
  description: "By default, this op performs an inclusive cumsum, which means that the first\nelement of the input is identical to the first element of the output:\n\n```python\ntf.cumsum([a, b, c])  # => [a, a + b, a + b + c]\n```\n\nBy setting the `exclusive` kwarg to `True`, an exclusive cumsum is\nperformed instead:\n\n```python\ntf.cumsum([a, b, c], exclusive=True)  # => [0, a, a + b]\n```\n\nBy setting the `reverse` kwarg to `True`, the cumsum is performed in the\nopposite direction:\n\n```python\ntf.cumsum([a, b, c], reverse=True)  # => [a + b + c, b + c, c]\n```\n\nThis is more efficient than using separate `tf.reverse` ops.\n\nThe `reverse` and `exclusive` kwargs can also be combined:\n\n```python\ntf.cumsum([a, b, c], exclusive=True, reverse=True)  # => [b + c, c, 0]\n```"
}
op {
  name: "DatasetToSingleElement"
  input_arg {
    name: "dataset"
    description: "A handle to a dataset that contains a single element."
    type: DT_VARIANT
  }
  output_arg {
    name: "components"
    description: "The components of the single element of `input`."
    type_list_attr: "output_types"
  }
  attr {
    name: "output_types"
    type: "list(type)"
    has_minimum: true
    minimum: 1
  }
  attr {
    name: "output_shapes"
    type: "list(shape)"
    has_minimum: true
    minimum: 1
  }
  summary: "Outputs the single element from the given dataset."
}
op {
  name: "DebugGradientIdentity"
  input_arg {
    name: "input"
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
  }
  summary: "Identity op for gradient debugging."
  description: "This op is hidden from public in Python. It is used by TensorFlow Debugger to\nregister gradient tensors for gradient debugging."
  allows_uninitialized_input: true
}
op {
  name: "DebugIdentity"
  input_arg {
    name: "input"
    description: "Input tensor, non-Reference type."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "Output tensor that equals the input tensor."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
  }
  attr {
    name: "device_name"
    type: "string"
    default_value {
      s: ""
    }
  }
  attr {
    name: "tensor_name"
    type: "string"
    default_value {
      s: ""
    }
    description: "Name of the input tensor."
  }
  attr {
    name: "debug_urls"
    type: "list(string)"
    default_value {
      list {
      }
    }
    description: "List of URLs to debug targets, e.g.,\nfile:///foo/tfdbg_dump, grpc:://localhost:11011"
  }
  attr {
    name: "gated_grpc"
    type: "bool"
    default_value {
      b: false
    }
    description: "Whether this op will be gated. If any of the debug_urls of this\ndebug node is of the grpc:// scheme, when the value of this attribute is set\nto True, the data will not actually be sent via the grpc stream unless this\ndebug op has been enabled at the debug_url. If all of the debug_urls of this\ndebug node are of the grpc:// scheme and the debug op is enabled at none of\nthem, the output will be an empty Tensor."
  }
  summary: "Debug Identity Op."
  description: "Provides an identity mapping of the non-Ref type input tensor for debugging."
  allows_uninitialized_input: true
}
op {
  name: "DebugNanCount"
  input_arg {
    name: "input"
    description: "Input tensor, non-Reference type."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "An integer output tensor that is the number of NaNs in the input."
    type: DT_INT64
  }
  attr {
    name: "T"
    type: "type"
  }
  attr {
    name: "device_name"
    type: "string"
    default_value {
      s: ""
    }
  }
  attr {
    name: "tensor_name"
    type: "string"
    default_value {
      s: ""
    }
    description: "Name of the input tensor."
  }
  attr {
    name: "debug_urls"
    type: "list(string)"
    default_value {
      list {
      }
    }
    description: "List of URLs to debug targets, e.g.,\nfile:///foo/tfdbg_dump, grpc:://localhost:11011."
  }
  attr {
    name: "gated_grpc"
    type: "bool"
    default_value {
      b: false
    }
    description: "Whether this op will be gated. If any of the debug_urls of this\ndebug node is of the grpc:// scheme, when the value of this attribute is set\nto True, the data will not actually be sent via the grpc stream unless this\ndebug op has been enabled at the debug_url. If all of the debug_urls of this\ndebug node are of the grpc:// scheme and the debug op is enabled at none of\nthem, the output will be an empty Tensor."
  }
  summary: "Debug NaN Value Counter Op"
  description: "Counts number of NaNs in the input tensor, for debugging."
  allows_uninitialized_input: true
}
op {
  name: "DebugNumericSummary"
  input_arg {
    name: "input"
    description: "Input tensor, non-Reference type, float or double."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "A double tensor of shape [14 + nDimensions], where nDimensions is the\n  the number of dimensions of the tensor\'s shape. The elements of output are:\n  [0]: is initialized (1.0) or not (0.0).\n  [1]: total number of elements\n  [2]: NaN element count\n  [3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by\n    default.\n  [4]: negative element count (excluding -inf), if lower_bound is the default\n    -inf. Otherwise, this is the count of elements > lower_bound and < 0.\n  [5]: zero element count\n  [6]: positive element count (excluding +inf), if upper_bound is the default\n    -inf. Otherwise, this is the count of elements < upper_bound and > 0.\n  [7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by\n    default.\nOutput elements [1:8] are all zero, if the tensor is uninitialized.\n  [8]: minimum of all non-inf and non-NaN elements.\n       If uninitialized or no such element exists: +inf.\n  [9]: maximum of all non-inf and non-NaN elements.\n       If uninitialized or no such element exists: -inf.\n  [10]: mean of all non-inf and non-NaN elements.\n        If uninitialized or no such element exists: NaN.\n  [11]: variance of all non-inf and non-NaN elements.\n        If uninitialized or no such element exists: NaN.\n  [12]: Data type of the tensor encoded as an enum integer. See the DataType\n        proto for more details.\n  [13]: Number of dimensions of the tensor (ndims).\n  [14+]: Sizes of the dimensions."
    type: DT_DOUBLE
  }
  attr {
    name: "T"
    type: "type"
  }
  attr {
    name: "device_name"
    type: "string"
    default_value {
      s: ""
    }
  }
  attr {
    name: "tensor_name"
    type: "string"
    default_value {
      s: ""
    }
    description: "Name of the input tensor."
  }
  attr {
    name: "debug_urls"
    type: "list(string)"
    default_value {
      list {
      }
    }
    description: "List of URLs to debug targets, e.g.,\nfile:///foo/tfdbg_dump, grpc:://localhost:11011"
  }
  attr {
    name: "lower_bound"
    type: "float"
    default_value {
      f: -inf
    }
    description: "(float) The lower bound <= which values will be included in the\ngeneralized -inf count. Default: -inf."
  }
  attr {
    name: "upper_bound"
    type: "float"
    default_value {
      f: inf
    }
    description: "(float) The upper bound >= which values will be included in the\ngeneralized +inf count. Default: +inf."
  }
  attr {
    name: "mute_if_healthy"
    type: "bool"
    default_value {
      b: false
    }
    description: "(bool) Do not send data to the debug URLs unless at least one\nof elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and\ninf counts) is non-zero."
  }
  attr {
    name: "gated_grpc"
    type: "bool"
    default_value {
      b: false
    }
    description: "Whether this op will be gated. If any of the debug_urls of this\ndebug node is of the grpc:// scheme, when the value of this attribute is set\nto True, the data will not actually be sent via the grpc stream unless this\ndebug op has been enabled at the debug_url. If all of the debug_urls of this\ndebug node are of the grpc:// scheme and the debug op is enabled at none of\nthem, the output will be an empty Tensor."
  }
  summary: "Debug Numeric Summary Op."
  description: "Provide a basic summary of numeric value types, range and distribution."
  allows_uninitialized_input: true
}
op {
  name: "DecodeAndCropJpeg"
  input_arg {
    name: "contents"
    description: "0-D.  The JPEG-encoded image."
    type: DT_STRING
  }
  input_arg {
    name: "crop_window"
    description: "1-D.  The crop window: [crop_y, crop_x, crop_height, crop_width]."
    type: DT_INT32
  }
  output_arg {
    name: "image"
    description: "3-D with shape `[height, width, channels]`.."
    type: DT_UINT8
  }
  attr {
    name: "channels"
    type: "int"
    default_value {
      i: 0
    }
    description: "Number of color channels for the decoded image."
  }
  attr {
    name: "ratio"
    type: "int"
    default_value {
      i: 1
    }
    description: "Downscaling ratio."
  }
  attr {
    name: "fancy_upscaling"
    type: "bool"
    default_value {
      b: true
    }
    description: "If true use a slower but nicer upscaling of the\nchroma planes (yuv420/422 only)."
  }
  attr {
    name: "try_recover_truncated"
    type: "bool"
    default_value {
      b: false
    }
    description: "If true try to recover an image from truncated input."
  }
  attr {
    name: "acceptable_fraction"
    type: "float"
    default_value {
      f: 1
    }
    description: "The minimum required fraction of lines before a truncated\ninput is accepted."
  }
  attr {
    name: "dct_method"
    type: "string"
    default_value {
      s: ""
    }
    description: "string specifying a hint about the algorithm used for\ndecompression.  Defaults to \"\" which maps to a system-specific\ndefault.  Currently valid values are [\"INTEGER_FAST\",\n\"INTEGER_ACCURATE\"].  The hint may be ignored (e.g., the internal\njpeg library changes to a version that does not have that specific\noption.)"
  }
  summary: "Decode and Crop a JPEG-encoded image to a uint8 tensor."
  description: "The attr `channels` indicates the desired number of color channels for the\ndecoded image.\n\nAccepted values are:\n\n*   0: Use the number of channels in the JPEG-encoded image.\n*   1: output a grayscale image.\n*   3: output an RGB image.\n\nIf needed, the JPEG-encoded image is transformed to match the requested number\nof color channels.\n\nThe attr `ratio` allows downscaling the image by an integer factor during\ndecoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than\ndownscaling the image later.\n\n\nIt is equivalent to a combination of decode and crop, but much faster by only\ndecoding partial jpeg image."
}
op {
  name: "DecodeBase64"
  input_arg {
    name: "input"
    description: "Base64 strings to decode."
    type: DT_STRING
  }
  output_arg {
    name: "output"
    description: "Decoded strings."
    type: DT_STRING
  }
  summary: "Decode web-safe base64-encoded strings."
  description: "Input may or may not have padding at the end. See EncodeBase64 for padding.\nWeb-safe means that input must use - and _ instead of + and /."
}
op {
  name: "DecodeBmp"
  input_arg {
    name: "contents"
    description: "0-D.  The BMP-encoded image."
    type: DT_STRING
  }
  output_arg {
    name: "image"
    description: "3-D with shape `[height, width, channels]`. RGB order"
    type: DT_UINT8
  }
  attr {
    name: "channels"
    type: "int"
    default_value {
      i: 0
    }
  }
  summary: "Decode the first frame of a BMP-encoded image to a uint8 tensor."
  description: "The attr `channels` indicates the desired number of color channels for the\ndecoded image.\n\nAccepted values are:\n\n*   0: Use the number of channels in the BMP-encoded image.\n*   3: output an RGB image.\n*   4: output an RGBA image."
}
op {
  name: "DecodeCSV"
  input_arg {
    name: "records"
    description: "Each string is a record/row in the csv and all records should have\nthe same format."
    type: DT_STRING
  }
  input_arg {
    name: "record_defaults"
    description: "One tensor per column of the input record, with either a\nscalar default value for that column or empty if the column is required."
    type_list_attr: "OUT_TYPE"
  }
  output_arg {
    name: "output"
    description: "Each tensor will have the same shape as records."
    type_list_attr: "OUT_TYPE"
  }
  attr {
    name: "OUT_TYPE"
    type: "list(type)"
    has_minimum: true
    minimum: 1
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT32
        type: DT_INT64
        type: DT_STRING
      }
    }
  }
  attr {
    name: "field_delim"
    type: "string"
    default_value {
      s: ","
    }
    description: "char delimiter to separate fields in a record."
  }
  attr {
    name: "use_quote_delim"
    type: "bool"
    default_value {
      b: true
    }
    description: "If false, treats double quotation marks as regular\ncharacters inside of the string fields (ignoring RFC 4180, Section 2,\nBullet 5)."
  }
  attr {
    name: "na_value"
    type: "string"
    default_value {
      s: ""
    }
    description: "Additional string to recognize as NA/NaN."
  }
  summary: "Convert CSV records to tensors. Each column maps to one tensor."
  description: "RFC 4180 format is expected for the CSV records.\n(https://tools.ietf.org/html/rfc4180)\nNote that we allow leading and trailing spaces with int or float field."
}
op {
  name: "DecodeGif"
  input_arg {
    name: "contents"
    description: "0-D.  The GIF-encoded image."
    type: DT_STRING
  }
  output_arg {
    name: "image"
    description: "4-D with shape `[num_frames, height, width, 3]`. RGB order"
    type: DT_UINT8
  }
  summary: "Decode the first frame of a GIF-encoded image to a uint8 tensor."
  description: "GIF with frame or transparency compression are not supported\nconvert animated GIF from compressed to uncompressed by:\n\n    convert $src.gif -coalesce $dst.gif\n\nThis op also supports decoding JPEGs and PNGs, though it is cleaner to use\n`tf.image.decode_image`."
}
op {
  name: "DecodeJSONExample"
  input_arg {
    name: "json_examples"
    description: "Each string is a JSON object serialized according to the JSON\nmapping of the Example proto."
    type: DT_STRING
  }
  output_arg {
    name: "binary_examples"
    description: "Each string is a binary Example protocol buffer corresponding\nto the respective element of `json_examples`."
    type: DT_STRING
  }
  summary: "Convert JSON-encoded Example records to binary protocol buffer strings."
  description: "This op translates a tensor containing Example records, encoded using\nthe [standard JSON\nmapping](https://developers.google.com/protocol-buffers/docs/proto3#json),\ninto a tensor containing the same records encoded as binary protocol\nbuffers. The resulting tensor can then be fed to any of the other\nExample-parsing ops."
}
op {
  name: "DecodeJpeg"
  input_arg {
    name: "contents"
    description: "0-D.  The JPEG-encoded image."
    type: DT_STRING
  }
  output_arg {
    name: "image"
    description: "3-D with shape `[height, width, channels]`.."
    type: DT_UINT8
  }
  attr {
    name: "channels"
    type: "int"
    default_value {
      i: 0
    }
    description: "Number of color channels for the decoded image."
  }
  attr {
    name: "ratio"
    type: "int"
    default_value {
      i: 1
    }
    description: "Downscaling ratio."
  }
  attr {
    name: "fancy_upscaling"
    type: "bool"
    default_value {
      b: true
    }
    description: "If true use a slower but nicer upscaling of the\nchroma planes (yuv420/422 only)."
  }
  attr {
    name: "try_recover_truncated"
    type: "bool"
    default_value {
      b: false
    }
    description: "If true try to recover an image from truncated input."
  }
  attr {
    name: "acceptable_fraction"
    type: "float"
    default_value {
      f: 1
    }
    description: "The minimum required fraction of lines before a truncated\ninput is accepted."
  }
  attr {
    name: "dct_method"
    type: "string"
    default_value {
      s: ""
    }
    description: "string specifying a hint about the algorithm used for\ndecompression.  Defaults to \"\" which maps to a system-specific\ndefault.  Currently valid values are [\"INTEGER_FAST\",\n\"INTEGER_ACCURATE\"].  The hint may be ignored (e.g., the internal\njpeg library changes to a version that does not have that specific\noption.)"
  }
  summary: "Decode a JPEG-encoded image to a uint8 tensor."
  description: "The attr `channels` indicates the desired number of color channels for the\ndecoded image.\n\nAccepted values are:\n\n*   0: Use the number of channels in the JPEG-encoded image.\n*   1: output a grayscale image.\n*   3: output an RGB image.\n\nIf needed, the JPEG-encoded image is transformed to match the requested number\nof color channels.\n\nThe attr `ratio` allows downscaling the image by an integer factor during\ndecoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than\ndownscaling the image later.\n\n\nThis op also supports decoding PNGs and non-animated GIFs since the interface is\nthe same, though it is cleaner to use `tf.image.decode_image`."
}
op {
  name: "DecodePng"
  input_arg {
    name: "contents"
    description: "0-D.  The PNG-encoded image."
    type: DT_STRING
  }
  output_arg {
    name: "image"
    description: "3-D with shape `[height, width, channels]`."
    type_attr: "dtype"
  }
  attr {
    name: "channels"
    type: "int"
    default_value {
      i: 0
    }
    description: "Number of color channels for the decoded image."
  }
  attr {
    name: "dtype"
    type: "type"
    default_value {
      type: DT_UINT8
    }
    allowed_values {
      list {
        type: DT_UINT8
        type: DT_UINT16
      }
    }
  }
  summary: "Decode a PNG-encoded image to a uint8 or uint16 tensor."
  description: "The attr `channels` indicates the desired number of color channels for the\ndecoded image.\n\nAccepted values are:\n\n*   0: Use the number of channels in the PNG-encoded image.\n*   1: output a grayscale image.\n*   3: output an RGB image.\n*   4: output an RGBA image.\n\nIf needed, the PNG-encoded image is transformed to match the requested number\nof color channels.\n\nThis op also supports decoding JPEGs and non-animated GIFs since the interface\nis the same, though it is cleaner to use `tf.image.decode_image`."
}
op {
  name: "DecodeRaw"
  input_arg {
    name: "bytes"
    description: "All the elements must have the same length."
    type: DT_STRING
  }
  output_arg {
    name: "output"
    description: "A Tensor with one more dimension than the input `bytes`.  The\nadded dimension will have size equal to the length of the elements\nof `bytes` divided by the number of bytes to represent `out_type`."
    type_attr: "out_type"
  }
  attr {
    name: "out_type"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT32
        type: DT_UINT16
        type: DT_UINT8
        type: DT_INT16
        type: DT_INT8
        type: DT_INT64
      }
    }
  }
  attr {
    name: "little_endian"
    type: "bool"
    default_value {
      b: true
    }
    description: "Whether the input `bytes` are in little-endian order.\nIgnored for `out_type` values that are stored in a single byte like\n`uint8`."
  }
  summary: "Reinterpret the bytes of a string as a vector of numbers."
}
op {
  name: "DecodeWav"
  input_arg {
    name: "contents"
    description: "The WAV-encoded audio, usually from a file."
    type: DT_STRING
  }
  output_arg {
    name: "audio"
    description: "2-D with shape `[length, channels]`."
    type: DT_FLOAT
  }
  output_arg {
    name: "sample_rate"
    description: "Scalar holding the sample rate found in the WAV header."
    type: DT_INT32
  }
  attr {
    name: "desired_channels"
    type: "int"
    default_value {
      i: -1
    }
    description: "Number of sample channels wanted."
  }
  attr {
    name: "desired_samples"
    type: "int"
    default_value {
      i: -1
    }
    description: "Length of audio requested."
  }
  summary: "Decode a 16-bit PCM WAV file to a float tensor."
  description: "The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.\n\nWhen desired_channels is set, if the input contains fewer channels than this\nthen the last channel will be duplicated to give the requested number, else if\nthe input has more channels than requested then the additional channels will be\nignored.\n\nIf desired_samples is set, then the audio will be cropped or padded with zeroes\nto the requested length.\n\nThe first output contains a Tensor with the content of the audio samples. The\nlowest dimension will be the number of channels, and the second will be the\nnumber of samples. For example, a ten-sample-long stereo WAV file should give an\noutput shape of [10, 2]."
}
op {
  name: "DeleteSessionTensor"
  input_arg {
    name: "handle"
    description: "The handle for a tensor stored in the session state."
    type: DT_STRING
  }
  summary: "Delete the tensor specified by its handle in the session."
}
op {
  name: "DenseToDenseSetOperation"
  input_arg {
    name: "set1"
    description: "`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.\nDimension `n` contains values in a set, duplicates are allowed but ignored."
    type_attr: "T"
  }
  input_arg {
    name: "set2"
    description: "`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.\nDimension `n` contains values in a set, duplicates are allowed but ignored."
    type_attr: "T"
  }
  output_arg {
    name: "result_indices"
    description: "2D indices of a `SparseTensor`."
    type: DT_INT64
  }
  output_arg {
    name: "result_values"
    description: "1D values of a `SparseTensor`."
    type_attr: "T"
  }
  output_arg {
    name: "result_shape"
    description: "1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is\nthe same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`\nis the max result set size across all `0...n-1` dimensions."
    type: DT_INT64
  }
  attr {
    name: "set_operation"
    type: "string"
  }
  attr {
    name: "validate_indices"
    type: "bool"
    default_value {
      b: true
    }
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_INT8
        type: DT_INT16
        type: DT_INT32
        type: DT_INT64
        type: DT_UINT8
        type: DT_UINT16
        type: DT_STRING
      }
    }
  }
  summary: "Applies set operation along last dimension of 2 `Tensor` inputs."
  description: "See SetOperationOp::SetOperationFromContext for values of `set_operation`.\n\nOutput `result` is a `SparseTensor` represented by `result_indices`,\n`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this\nhas rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`\ndimension contains the result of `set_operation` applied to the corresponding\n`[0...n-1]` dimension of `set`."
}
op {
  name: "DenseToSparseBatchDataset"
  input_arg {
    name: "input_dataset"
    description: "A handle to an input dataset. Must have a single component."
    type: DT_VARIANT
  }
  input_arg {
    name: "batch_size"
    description: "A scalar representing the number of elements to accumulate in a\nbatch."
    type: DT_INT64
  }
  input_arg {
    name: "row_shape"
    description: "A vector representing the dense shape of each row in the produced\nSparseTensor. The shape may be partially specified, using `-1` to indicate\nthat a particular dimension should use the maximum size of all batch elements."
    type: DT_INT64
  }
  output_arg {
    name: "handle"
    type: DT_VARIANT
  }
  attr {
    name: "output_types"
    type: "list(type)"
    has_minimum: true
    minimum: 1
  }
  attr {
    name: "output_shapes"
    type: "list(shape)"
    has_minimum: true
    minimum: 1
  }
  summary: "Creates a dataset that yields a SparseTensor for each element of the input."
}
op {
  name: "DenseToSparseSetOperation"
  input_arg {
    name: "set1"
    description: "`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.\nDimension `n` contains values in a set, duplicates are allowed but ignored."
    type_attr: "T"
  }
  input_arg {
    name: "set2_indices"
    description: "2D `Tensor`, indices of a `SparseTensor`. Must be in row-major\norder."
    type: DT_INT64
  }
  input_arg {
    name: "set2_values"
    description: "1D `Tensor`, values of a `SparseTensor`. Must be in row-major\norder."
    type_attr: "T"
  }
  input_arg {
    name: "set2_shape"
    description: "1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must\nbe the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the\nmax set size across `n-1` dimensions."
    type: DT_INT64
  }
  output_arg {
    name: "result_indices"
    description: "2D indices of a `SparseTensor`."
    type: DT_INT64
  }
  output_arg {
    name: "result_values"
    description: "1D values of a `SparseTensor`."
    type_attr: "T"
  }
  output_arg {
    name: "result_shape"
    description: "1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is\nthe same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`\nis the max result set size across all `0...n-1` dimensions."
    type: DT_INT64
  }
  attr {
    name: "set_operation"
    type: "string"
  }
  attr {
    name: "validate_indices"
    type: "bool"
    default_value {
      b: true
    }
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_INT8
        type: DT_INT16
        type: DT_INT32
        type: DT_INT64
        type: DT_UINT8
        type: DT_UINT16
        type: DT_STRING
      }
    }
  }
  summary: "Applies set operation along last dimension of `Tensor` and `SparseTensor`."
  description: "See SetOperationOp::SetOperationFromContext for values of `set_operation`.\n\nInput `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,\nand `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same\nas `set1`. Dimension `n` contains values in a set, duplicates are allowed but\nignored.\n\nIf `validate_indices` is `True`, this op validates the order and range of `set2`\nindices.\n\nOutput `result` is a `SparseTensor` represented by `result_indices`,\n`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this\nhas rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`\ndimension contains the result of `set_operation` applied to the corresponding\n`[0...n-1]` dimension of `set`."
}
op {
  name: "DepthToSpace"
  input_arg {
    name: "input"
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
  }
  attr {
    name: "block_size"
    type: "int"
    description: "The size of the spatial block, same as in Space2Depth."
    has_minimum: true
    minimum: 2
  }
  attr {
    name: "data_format"
    type: "string"
    default_value {
      s: "NHWC"
    }
    allowed_values {
      list {
        s: "NHWC"
        s: "NCHW"
        s: "NCHW_VECT_C"
      }
    }
  }
  summary: "DepthToSpace for tensors of type T."
  description: "Rearranges data from depth into blocks of spatial data.\nThis is the reverse transformation of SpaceToDepth. More specifically,\nthis op outputs a copy of the input tensor where values from the `depth`\ndimension are moved in spatial blocks to the `height` and `width` dimensions.\nThe attr `block_size` indicates the input block size and how the data is moved.\n\n  * Chunks of data of size `block_size * block_size` from depth are rearranged\n    into non-overlapping blocks of size `block_size x block_size`\n  * The width the output tensor is `input_depth * block_size`, whereas the\n    height is `input_height * block_size`.\n  * The Y, X coordinates within each block of the output image are determined\n    by the high order component of the input channel index.\n  * The depth of the input tensor must be divisible by\n    `block_size * block_size`.\n\nThe `data_format` attr specifies the layout of the input and output tensors\nwith the following options:\n  \"NHWC\": `[ batch, height, width, channels ]`\n  \"NCHW\": `[ batch, channels, height, width ]`\n  \"NCHW_VECT_C\":\n      `qint8 [ batch, channels / 4, height, width, channels % 4 ]`\n\nIt is useful to consider the operation as transforming a 6-D Tensor.\ne.g. for data_format = NHWC,\n     Each element in the input tensor can be specified via 6 coordinates,\n     ordered by decreasing memory layout significance as:\n     n,iY,iX,bY,bX,oC  (where n=batch index, iX, iY means X or Y coordinates\n                        within the input image, bX, bY means coordinates\n                        within the output block, oC means output channels).\n     The output would be the input transposed to the following layout:\n     n,iY,bY,iX,bX,oC\n\nThis operation is useful for resizing the activations between convolutions\n(but keeping all data), e.g. instead of pooling. It is also useful for training\npurely convolutional models.\n\nFor example, given an input of shape `[1, 1, 1, 4]`, data_format = \"NHWC\" and\nblock_size = 2:\n\n```\nx = [[[[1, 2, 3, 4]]]]\n\n```\n\nThis operation will output a tensor of shape `[1, 2, 2, 1]`:\n\n```\n   [[[[1], [2]],\n     [[3], [4]]]]\n```\n\nHere, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,\nthe corresponding output will have 2x2 elements and will have a depth of\n1 channel (1 = `4 / (block_size * block_size)`).\nThe output element shape is `[2, 2, 1]`.\n\nFor an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.\n\n```\nx = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]\n```\n\nThis operation, for block size of 2, will return the following tensor of shape\n`[1, 2, 2, 3]`\n\n```\n   [[[[1, 2, 3], [4, 5, 6]],\n     [[7, 8, 9], [10, 11, 12]]]]\n\n```\n\nSimilarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:\n\n```\nx =  [[[[1, 2, 3, 4],\n       [5, 6, 7, 8]],\n      [[9, 10, 11, 12],\n       [13, 14, 15, 16]]]]\n```\n\nthe operator will return the following tensor of shape `[1 4 4 1]`:\n\n```\nx = [[[ [1],   [2],  [5],  [6]],\n      [ [3],   [4],  [7],  [8]],\n      [ [9],  [10], [13],  [14]],\n      [ [11], [12], [15],  [16]]]]\n\n```"
}
op {
  name: "DepthwiseConv2dNative"
  input_arg {
    name: "input"
    type_attr: "T"
  }
  input_arg {
    name: "filter"
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  attr {
    name: "strides"
    type: "list(int)"
    description: "1-D of length 4.  The stride of the sliding window for each dimension\nof `input`."
  }
  attr {
    name: "padding"
    type: "string"
    description: "The type of padding algorithm to use."
    allowed_values {
      list {
        s: "SAME"
        s: "VALID"
      }
    }
  }
  attr {
    name: "data_format"
    type: "string"
    default_value {
      s: "NHWC"
    }
    description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, height, width, channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, channels, height, width]."
    allowed_values {
      list {
        s: "NHWC"
        s: "NCHW"
      }
    }
  }
  summary: "Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors."
  description: "Given an input tensor of shape `[batch, in_height, in_width, in_channels]`\nand a filter / kernel tensor of shape\n`[filter_height, filter_width, in_channels, channel_multiplier]`, containing\n`in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies\na different filter to each input channel (expanding from 1 channel to\n`channel_multiplier` channels for each), then concatenates the results\ntogether. Thus, the output has `in_channels * channel_multiplier` channels.\n\n```\nfor k in 0..in_channels-1\n  for q in 0..channel_multiplier-1\n    output[b, i, j, k * channel_multiplier + q] =\n      sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *\n                        filter[di, dj, k, q]\n```\n\nMust have `strides[0] = strides[3] = 1`.  For the most common case of the same\nhorizontal and vertices strides, `strides = [1, stride, stride, 1]`."
}
op {
  name: "DepthwiseConv2dNativeBackpropFilter"
  input_arg {
    name: "input"
    description: "4-D with shape based on `data_format`.  For example, if\n`data_format` is \'NHWC\' then `input` is a 4-D `[batch, in_height,\nin_width, in_channels]` tensor."
    type_attr: "T"
  }
  input_arg {
    name: "filter_sizes"
    description: "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 4-D\n`[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor."
    type: DT_INT32
  }
  input_arg {
    name: "out_backprop"
    description: "4-D with shape  based on `data_format`.\nFor example, if `data_format` is \'NHWC\' then\nout_backprop shape is `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.\nthe `filter` input of the convolution."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  attr {
    name: "strides"
    type: "list(int)"
    description: "The stride of the sliding window for each dimension of the input\nof the convolution."
  }
  attr {
    name: "padding"
    type: "string"
    description: "The type of padding algorithm to use."
    allowed_values {
      list {
        s: "SAME"
        s: "VALID"
      }
    }
  }
  attr {
    name: "data_format"
    type: "string"
    default_value {
      s: "NHWC"
    }
    description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, height, width, channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, channels, height, width]."
    allowed_values {
      list {
        s: "NHWC"
        s: "NCHW"
      }
    }
  }
  summary: "Computes the gradients of depthwise convolution with respect to the filter."
}
op {
  name: "DepthwiseConv2dNativeBackpropInput"
  input_arg {
    name: "input_sizes"
    description: "An integer vector representing the shape of `input`, based\non `data_format`.  For example, if `data_format` is \'NHWC\' then\n `input` is a 4-D `[batch, height, width, channels]` tensor."
    type: DT_INT32
  }
  input_arg {
    name: "filter"
    description: "4-D with shape\n`[filter_height, filter_width, in_channels, depthwise_multiplier]`."
    type_attr: "T"
  }
  input_arg {
    name: "out_backprop"
    description: "4-D with shape  based on `data_format`.\nFor example, if `data_format` is \'NHWC\' then\nout_backprop shape is `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "4-D with shape according to `data_format`.  For example, if\n`data_format` is \'NHWC\', output shape is `[batch, in_height,\nin_width, in_channels]`.  Gradient w.r.t. the input of the\nconvolution."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  attr {
    name: "strides"
    type: "list(int)"
    description: "The stride of the sliding window for each dimension of the input\nof the convolution."
  }
  attr {
    name: "padding"
    type: "string"
    description: "The type of padding algorithm to use."
    allowed_values {
      list {
        s: "SAME"
        s: "VALID"
      }
    }
  }
  attr {
    name: "data_format"
    type: "string"
    default_value {
      s: "NHWC"
    }
    description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n    [batch, height, width, channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n    [batch, channels, height, width]."
    allowed_values {
      list {
        s: "NHWC"
        s: "NCHW"
      }
    }
  }
  summary: "Computes the gradients of depthwise convolution with respect to the input."
}
op {
  name: "Dequantize"
  input_arg {
    name: "input"
    type_attr: "T"
  }
  input_arg {
    name: "min_range"
    description: "The minimum scalar value possibly produced for the input."
    type: DT_FLOAT
  }
  input_arg {
    name: "max_range"
    description: "The maximum scalar value possibly produced for the input."
    type: DT_FLOAT
  }
  output_arg {
    name: "output"
    type: DT_FLOAT
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_QINT8
        type: DT_QUINT8
        type: DT_QINT16
        type: DT_QUINT16
        type: DT_QINT32
      }
    }
  }
  attr {
    name: "mode"
    type: "string"
    default_value {
      s: "MIN_COMBINED"
    }
    allowed_values {
      list {
        s: "MIN_COMBINED"
        s: "MIN_FIRST"
        s: "SCALED"
      }
    }
  }
  summary: "Dequantize the \'input\' tensor into a float Tensor."
  description: "[min_range, max_range] are scalar floats that specify the range for\nthe \'input\' data. The \'mode\' attribute controls exactly which calculations are\nused to convert the float values to their quantized equivalents.\n\nIn \'MIN_COMBINED\' mode, each value of the tensor will undergo the following:\n\n```\nif T == qint8, in[i] += (range(T) + 1)/ 2.0\nout[i] = min_range + (in[i]* (max_range - min_range) / range(T))\n```\nhere `range(T) = numeric_limits::max() - numeric_limits::min()`\n\n*MIN_COMBINED Mode Example*\n\nIf the input comes from a QuantizedRelu6, the output type is\nquint8 (range of 0-255) but the possible range of QuantizedRelu6 is\n0-6.  The min_range and max_range values are therefore 0.0 and 6.0.\nDequantize on quint8 will take each value, cast to float, and multiply\nby 6 / 255.\nNote that if quantizedtype is qint8, the operation will additionally add\neach value by 128 prior to casting.\n\nIf the mode is \'MIN_FIRST\', then this approach is used:\n\n```c++\nnum_discrete_values = 1 << (# of bits in T)\nrange_adjust = num_discrete_values / (num_discrete_values - 1)\nrange = (range_max - range_min) * range_adjust\nrange_scale = range / num_discrete_values\nconst double offset_input = static_cast(input) - lowest_quantized;\nresult = range_min + ((input - numeric_limits::min()) * range_scale)\n```\n\n*SCALED mode Example*\n\n`SCALED` mode matches the quantization approach used in\n`QuantizeAndDequantize{V2|V3}`.\n\nIf the mode is `SCALED`, we do not use the full range of the output type,\nchoosing to elide the lowest possible value for symmetry (e.g., output range is\n-127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to\n0.\n\nWe first find the range of values in our tensor. The\nrange we use is always centered on 0, so we find m such that\n```c++\n  m = max(abs(input_min), abs(input_max))\n```\n\nOur input tensor range is then `[-m, m]`.\n\nNext, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.\nIf T is signed, this is\n```\n  num_bits = sizeof(T) * 8\n  [min_fixed, max_fixed] =\n      [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]\n```\n\nOtherwise, if T is unsigned, the fixed-point range is\n```\n  [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]\n```\n\nFrom this we compute our scaling factor, s:\n```c++\n  s = (2 * m) / (max_fixed - min_fixed)\n```\n\nNow we can dequantize the elements of our tensor:\n```c++\nresult = input * s\n```"
}
op {
  name: "DeserializeIterator"
  input_arg {
    name: "resource_handle"
    description: "A handle to an iterator resource."
    type: DT_RESOURCE
  }
  input_arg {
    name: "serialized"
    description: "A variant tensor storing the state of the iterator contained in the\nresource."
    type: DT_VARIANT
  }
  summary: "Converts the given variant tensor to an iterator and stores it in the given resource."
  is_stateful: true
}
op {
  name: "DeserializeManySparse"
  input_arg {
    name: "serialized_sparse"
    description: "2-D, The `N` serialized `SparseTensor` objects.\nMust have 3 columns."
    type: DT_STRING
  }
  output_arg {
    name: "sparse_indices"
    type: DT_INT64
  }
  output_arg {
    name: "sparse_values"
    type_attr: "dtype"
  }
  output_arg {
    name: "sparse_shape"
    type: DT_INT64
  }
  attr {
    name: "dtype"
    type: "type"
    description: "The `dtype` of the serialized `SparseTensor` objects."
  }
  summary: "Deserialize and concatenate `SparseTensors` from a serialized minibatch."
  description: "The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where\n`N` is the minibatch size and the rows correspond to packed outputs of\n`SerializeSparse`.  The ranks of the original `SparseTensor` objects\nmust all match.  When the final `SparseTensor` is created, it has rank one\nhigher than the ranks of the incoming `SparseTensor` objects\n(they have been concatenated along a new row dimension).\n\nThe output `SparseTensor` object\'s shape values for all dimensions but the\nfirst are the max across the input `SparseTensor` objects\' shape values\nfor the corresponding dimensions.  Its first shape value is `N`, the minibatch\nsize.\n\nThe input `SparseTensor` objects\' indices are assumed ordered in\nstandard lexicographic order.  If this is not the case, after this\nstep run `SparseReorder` to restore index ordering.\n\nFor example, if the serialized input is a `[2 x 3]` matrix representing two\noriginal `SparseTensor` objects:\n\n    index = [ 0]\n            [10]\n            [20]\n    values = [1, 2, 3]\n    shape = [50]\n\nand\n\n    index = [ 2]\n            [10]\n    values = [4, 5]\n    shape = [30]\n\nthen the final deserialized `SparseTensor` will be:\n\n    index = [0  0]\n            [0 10]\n            [0 20]\n            [1  2]\n            [1 10]\n    values = [1, 2, 3, 4, 5]\n    shape = [2 50]"
}
op {
  name: "DestroyResourceOp"
  input_arg {
    name: "resource"
    description: "handle to the resource to delete."
    type: DT_RESOURCE
  }
  attr {
    name: "ignore_lookup_error"
    type: "bool"
    default_value {
      b: true
    }
    description: "whether to ignore the error when the resource\ndoesn\'t exist."
  }
  summary: "Deletes the resource specified by the handle."
  description: "All subsequent operations using the resource will result in a NotFound\nerror status."
  is_stateful: true
}
op {
  name: "DestroyTemporaryVariable"
  input_arg {
    name: "ref"
    description: "A reference to the temporary variable tensor."
    type_attr: "T"
    is_ref: true
  }
  output_arg {
    name: "value"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
  }
  attr {
    name: "var_name"
    type: "string"
    description: "Name of the temporary variable, usually the name of the matching\n\'TemporaryVariable\' op."
  }
  summary: "Destroys the temporary variable and returns its final value."
  description: "Sets output to the value of the Tensor pointed to by \'ref\', then destroys\nthe temporary variable called \'var_name\'.\nAll other uses of \'ref\' *must* have executed before this op.\nThis is typically achieved by chaining the ref through each assign op, or by\nusing control dependencies.\n\nOutputs the final value of the tensor pointed to by \'ref\'."
}
op {
  name: "Diag"
  input_arg {
    name: "diagonal"
    description: "Rank k tensor where k is at most 1."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT32
        type: DT_INT64
        type: DT_COMPLEX64
        type: DT_COMPLEX128
      }
    }
  }
  summary: "Returns a diagonal tensor with a given diagonal values."
  description: "Given a `diagonal`, this operation returns a tensor with the `diagonal` and\neverything else padded with zeros. The diagonal is computed as follows:\n\nAssume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of\nrank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:\n\n`output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.\n\nFor example:\n\n```\n# \'diagonal\' is [1, 2, 3, 4]\ntf.diag(diagonal) ==> [[1, 0, 0, 0]\n                       [0, 2, 0, 0]\n                       [0, 0, 3, 0]\n                       [0, 0, 0, 4]]\n```"
}
op {
  name: "DiagPart"
  input_arg {
    name: "input"
    description: "Rank k tensor where k is even and not zero."
    type_attr: "T"
  }
  output_arg {
    name: "diagonal"
    description: "The extracted diagonal."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT32
        type: DT_INT64
        type: DT_COMPLEX64
        type: DT_COMPLEX128
      }
    }
  }
  summary: "Returns the diagonal part of the tensor."
  description: "This operation returns a tensor with the `diagonal` part\nof the `input`. The `diagonal` part is computed as follows:\n\nAssume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a\ntensor of rank `k` with dimensions `[D1,..., Dk]` where:\n\n`diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.\n\nFor example:\n\n```\n# \'input\' is [[1, 0, 0, 0]\n              [0, 2, 0, 0]\n              [0, 0, 3, 0]\n              [0, 0, 0, 4]]\n\ntf.diag_part(input) ==> [1, 2, 3, 4]\n```"
}
op {
  name: "Digamma"
  input_arg {
    name: "x"
    type_attr: "T"
  }
  output_arg {
    name: "y"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
      }
    }
  }
  summary: "Computes Psi, the derivative of Lgamma (the log of the absolute value of"
  description: "`Gamma(x)`), element-wise."
}
op {
  name: "Dilation2D"
  input_arg {
    name: "input"
    description: "4-D with shape `[batch, in_height, in_width, depth]`."
    type_attr: "T"
  }
  input_arg {
    name: "filter"
    description: "3-D with shape `[filter_height, filter_width, depth]`."
    type_attr: "T"
  }
  output_arg {
    name: "output"
    description: "4-D with shape `[batch, out_height, out_width, depth]`."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT32
        type: DT_INT64
        type: DT_UINT8
        type: DT_INT16
        type: DT_INT8
        type: DT_UINT16
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "strides"
    type: "list(int)"
    description: "The stride of the sliding window for each dimension of the input\ntensor. Must be: `[1, stride_height, stride_width, 1]`."
    has_minimum: true
    minimum: 4
  }
  attr {
    name: "rates"
    type: "list(int)"
    description: "The input stride for atrous morphological dilation. Must be:\n`[1, rate_height, rate_width, 1]`."
    has_minimum: true
    minimum: 4
  }
  attr {
    name: "padding"
    type: "string"
    description: "The type of padding algorithm to use."
    allowed_values {
      list {
        s: "SAME"
        s: "VALID"
      }
    }
  }
  summary: "Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors."
  description: "The `input` tensor has shape `[batch, in_height, in_width, depth]` and the\n`filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each\ninput channel is processed independently of the others with its own structuring\nfunction. The `output` tensor has shape\n`[batch, out_height, out_width, depth]`. The spatial dimensions of the output\ntensor depend on the `padding` algorithm. We currently only support the default\n\"NHWC\" `data_format`.\n\nIn detail, the grayscale morphological 2-D dilation is the max-sum correlation\n(for consistency with `conv2d`, we use unmirrored filters):\n\n    output[b, y, x, c] =\n       max_{dy, dx} input[b,\n                          strides[1] * y + rates[1] * dy,\n                          strides[2] * x + rates[2] * dx,\n                          c] +\n                    filter[dy, dx, c]\n\nMax-pooling is a special case when the filter has size equal to the pooling\nkernel size and contains all zeros.\n\nNote on duality: The dilation of `input` by the `filter` is equal to the\nnegation of the erosion of `-input` by the reflected `filter`."
}
op {
  name: "Dilation2DBackpropFilter"
  input_arg {
    name: "input"
    description: "4-D with shape `[batch, in_height, in_width, depth]`."
    type_attr: "T"
  }
  input_arg {
    name: "filter"
    description: "3-D with shape `[filter_height, filter_width, depth]`."
    type_attr: "T"
  }
  input_arg {
    name: "out_backprop"
    description: "4-D with shape `[batch, out_height, out_width, depth]`."
    type_attr: "T"
  }
  output_arg {
    name: "filter_backprop"
    description: "3-D with shape `[filter_height, filter_width, depth]`."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT32
        type: DT_INT64
        type: DT_UINT8
        type: DT_INT16
        type: DT_INT8
        type: DT_UINT16
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "strides"
    type: "list(int)"
    description: "1-D of length 4. The stride of the sliding window for each dimension of\nthe input tensor. Must be: `[1, stride_height, stride_width, 1]`."
    has_minimum: true
    minimum: 4
  }
  attr {
    name: "rates"
    type: "list(int)"
    description: "1-D of length 4. The input stride for atrous morphological dilation.\nMust be: `[1, rate_height, rate_width, 1]`."
    has_minimum: true
    minimum: 4
  }
  attr {
    name: "padding"
    type: "string"
    description: "The type of padding algorithm to use."
    allowed_values {
      list {
        s: "SAME"
        s: "VALID"
      }
    }
  }
  summary: "Computes the gradient of morphological 2-D dilation with respect to the filter."
}
op {
  name: "Dilation2DBackpropInput"
  input_arg {
    name: "input"
    description: "4-D with shape `[batch, in_height, in_width, depth]`."
    type_attr: "T"
  }
  input_arg {
    name: "filter"
    description: "3-D with shape `[filter_height, filter_width, depth]`."
    type_attr: "T"
  }
  input_arg {
    name: "out_backprop"
    description: "4-D with shape `[batch, out_height, out_width, depth]`."
    type_attr: "T"
  }
  output_arg {
    name: "in_backprop"
    description: "4-D with shape `[batch, in_height, in_width, depth]`."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_INT32
        type: DT_INT64
        type: DT_UINT8
        type: DT_INT16
        type: DT_INT8
        type: DT_UINT16
        type: DT_HALF
        type: DT_UINT32
        type: DT_UINT64
      }
    }
  }
  attr {
    name: "strides"
    type: "list(int)"
    description: "1-D of length 4. The stride of the sliding window for each dimension of\nthe input tensor. Must be: `[1, stride_height, stride_width, 1]`."
    has_minimum: true
    minimum: 4
  }
  attr {
    name: "rates"
    type: "list(int)"
    description: "1-D of length 4. The input stride for atrous morphological dilation.\nMust be: `[1, rate_height, rate_width, 1]`."
    has_minimum: true
    minimum: 4
  }
  attr {
    name: "padding"
    type: "string"
    description: "The type of padding algorithm to use."
    allowed_values {
      list {
        s: "SAME"
        s: "VALID"
      }
    }
  }
  summary: "Computes the gradient of morphological 2-D dilation with respect to the input."
}
op {
  name: "Div"
  input_arg {
    name: "x"
    type_attr: "T"
  }
  input_arg {
    name: "y"
    type_attr: "T"
  }
  output_arg {
    name: "z"
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    allowed_values {
      list {
        type: DT_HALF
        type: DT_FLOAT
        type: DT_DOUBLE
        type: DT_UINT8
        type: DT_INT8
        type: DT_UINT16
        type: DT_INT16
        type: DT_INT32
        type: DT_INT64
        type: DT_COMPLEX64
        type: DT_COMPLEX128
      }
    }
  }
  summary: "Returns x / y element-wise."
  description: "*NOTE*: `Div` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)"
}
op {
  name: "DrawBoundingBoxes"
  input_arg {
    name: "images"
    description: "4-D with shape `[batch, height, width, depth]`. A batch of images."
    type_attr: "T"
  }
  input_arg {
    name: "boxes"
    description: "3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding\nboxes."
    type: DT_FLOAT
  }
  output_arg {
    name: "output"
    description: "4-D with the same shape as `images`. The batch of input images with\nbounding boxes drawn on the images."
    type_attr: "T"
  }
  attr {
    name: "T"
    type: "type"
    default_value {
      type: DT_FLOAT
    }
    allowed_values {
      list {
        type: DT_FLOAT
        type: DT_HALF
      }
    }
  }
  summary: "Draw bounding boxes on a batch of images."
  description: "Outputs a copy of `images` but draws on top of the pixels zero or more bounding\nboxes specified by the locations in `boxes`. The coordinates of the each\nbounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The\nbounding box coordinates are floats in `[0.0, 1.0]` relative to the width and\nheight of the underlying image.\n\nFor example, if an image is 100 x 200 pixels (height x width) and the bounding\nbox is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of\nthe bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates).\n\nParts of the bounding box may fall outside the image."
}
op {
  name: "DynamicPartition"
  input_arg {
    name: "data"
    type_attr: "T"
  }
  input_arg {
    name: "partitions"
    description: "Any shape.  Indices in the range `[0, num_partitions)`."
    type: DT_INT32
  }
  output_arg {
    name: "outputs"
    type_attr: "T"
    number_attr: "num_partitions"
  }
  attr {
    name: "num_partitions"
    type: "int"
    description: "The number of partitions to output."
    has_minimum: true
    minimum: 1
  }
  attr {
    name: "T"
    type: "type"
  }
  summary: "Partitions `data` into `num_partitions` tensors using indices from `partitions`."
  description: "For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`\nbecomes part of `outputs[partitions[js]]`.  The slices with `partitions[js] = i`\nare placed in `outputs[i]` in lexicographic order of `js`, and the first\ndimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.\nIn detail,\n\n```python\n    outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]\n\n    outputs[i] = pack([data[js, ...] for js if partitions[js] == i])\n```\n\n`data.shape` must start with `partitions.shape`.\n\nFor example:\n\n```python\n    # Scalar partitions.\n    partitions = 1\n    num_partitions = 2\n    data = [10, 20]\n    outputs[0] = []  # Empty with shape [0, 2]\n    outputs[1] = [[10, 20]]\n\n    # Vector partitions.\n    partitions = [0, 0, 1, 1, 0]\n    num_partitions = 2\n    data = [10, 20, 30, 40, 50]\n    outputs[0] = [10, 20, 50]\n    outputs[1] = [30, 40]\n```\n\nSee `dynamic_stitch` for an example on how to merge partitions back.\n\n
\n\n
" } op { name: "DynamicStitch" input_arg { name: "indices" type: DT_INT32 number_attr: "N" } input_arg { name: "data" type_attr: "T" number_attr: "N" } output_arg { name: "merged" type_attr: "T" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } summary: "Interleave the values from the `data` tensors into a single tensor." description: "Builds a merged tensor such that\n\n```python\n merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]\n```\n\nFor example, if each `indices[m]` is scalar or vector, we have\n\n```python\n # Scalar indices:\n merged[indices[m], ...] = data[m][...]\n\n # Vector indices:\n merged[indices[m][i], ...] = data[m][i, ...]\n```\n\nEach `data[i].shape` must start with the corresponding `indices[i].shape`,\nand the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we\nmust have `data[i].shape = indices[i].shape + constant`. In terms of this\n`constant`, the output shape is\n\n merged.shape = [max(indices)] + constant\n\nValues are merged in order, so if an index appears in both `indices[m][i]` and\n`indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the\nmerged result. If you do not need this guarantee, ParallelDynamicStitch might\nperform better on some devices.\n\nFor example:\n\n```python\n indices[0] = 6\n indices[1] = [4, 1]\n indices[2] = [[5, 2], [0, 3]]\n data[0] = [61, 62]\n data[1] = [[41, 42], [11, 12]]\n data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]\n merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],\n [51, 52], [61, 62]]\n```\n\nThis method can be used to merge partitions created by `dynamic_partition`\nas illustrated on the following example:\n\n```python\n # Apply function (increments x_i) on elements for which a certain condition\n # apply (x_i != -1 in this example).\n x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])\n condition_mask=tf.not_equal(x,tf.constant(-1.))\n partitioned_data = tf.dynamic_partition(\n x, tf.cast(condition_mask, tf.int32) , 2)\n partitioned_data[1] = partitioned_data[1] + 1.0\n condition_indices = tf.dynamic_partition(\n tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)\n x = tf.dynamic_stitch(condition_indices, partitioned_data)\n # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain\n # unchanged.\n```\n\n
\n\n
" } op { name: "EditDistance" input_arg { name: "hypothesis_indices" description: "The indices of the hypothesis list SparseTensor.\nThis is an N x R int64 matrix." type: DT_INT64 } input_arg { name: "hypothesis_values" description: "The values of the hypothesis list SparseTensor.\nThis is an N-length vector." type_attr: "T" } input_arg { name: "hypothesis_shape" description: "The shape of the hypothesis list SparseTensor.\nThis is an R-length vector." type: DT_INT64 } input_arg { name: "truth_indices" description: "The indices of the truth list SparseTensor.\nThis is an M x R int64 matrix." type: DT_INT64 } input_arg { name: "truth_values" description: "The values of the truth list SparseTensor.\nThis is an M-length vector." type_attr: "T" } input_arg { name: "truth_shape" description: "truth indices, vector." type: DT_INT64 } output_arg { name: "output" description: "A dense float tensor with rank R - 1.\n\nFor the example input:\n\n // hypothesis represents a 2x1 matrix with variable-length values:\n // (0,0) = [\"a\"]\n // (1,0) = [\"b\"]\n hypothesis_indices = [[0, 0, 0],\n [1, 0, 0]]\n hypothesis_values = [\"a\", \"b\"]\n hypothesis_shape = [2, 1, 1]\n\n // truth represents a 2x2 matrix with variable-length values:\n // (0,0) = []\n // (0,1) = [\"a\"]\n // (1,0) = [\"b\", \"c\"]\n // (1,1) = [\"a\"]\n truth_indices = [[0, 1, 0],\n [1, 0, 0],\n [1, 0, 1],\n [1, 1, 0]]\n truth_values = [\"a\", \"b\", \"c\", \"a\"]\n truth_shape = [2, 2, 2]\n normalize = true\n\nThe output will be:\n\n // output is a 2x2 matrix with edit distances normalized by truth lengths.\n output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis\n [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis" type: DT_FLOAT } attr { name: "normalize" type: "bool" default_value { b: true } description: "boolean (if true, edit distances are normalized by length of truth).\n\nThe output is:" } attr { name: "T" type: "type" } summary: "Computes the (possibly normalized) Levenshtein Edit Distance." description: "The inputs are variable-length sequences provided by SparseTensors\n (hypothesis_indices, hypothesis_values, hypothesis_shape)\nand\n (truth_indices, truth_values, truth_shape).\n\nThe inputs are:" } op { name: "Elu" input_arg { name: "features" type_attr: "T" } output_arg { name: "activations" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } summary: "Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise." description: "See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)\n](http://arxiv.org/abs/1511.07289)" } op { name: "EluGrad" input_arg { name: "gradients" description: "The backpropagated gradients to the corresponding Elu operation." type_attr: "T" } input_arg { name: "outputs" description: "The outputs of the corresponding Elu operation." type_attr: "T" } output_arg { name: "backprops" description: "The gradients: `gradients * (outputs + 1)` if outputs < 0,\n`gradients` otherwise." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } summary: "Computes gradients for the exponential linear (Elu) operation." } op { name: "EncodeBase64" input_arg { name: "input" description: "Strings to be encoded." type: DT_STRING } output_arg { name: "output" description: "Input strings encoded in base64." type: DT_STRING } attr { name: "pad" type: "bool" default_value { b: false } description: "Bool whether padding is applied at the ends." } summary: "Encode strings into web-safe base64 format." description: "Refer to the following article for more information on base64 format:\nen.wikipedia.org/wiki/Base64. Base64 strings may have padding with \'=\' at the\nend so that the encoded has length multiple of 4. See Padding section of the\nlink above.\n\nWeb-safe means that the encoder uses - and _ instead of + and /." } op { name: "EncodeJpeg" input_arg { name: "image" description: "3-D with shape `[height, width, channels]`." type: DT_UINT8 } output_arg { name: "contents" description: "0-D. JPEG-encoded image." type: DT_STRING } attr { name: "format" type: "string" default_value { s: "" } description: "Per pixel image format." allowed_values { list { s: "" s: "grayscale" s: "rgb" } } } attr { name: "quality" type: "int" default_value { i: 95 } description: "Quality of the compression from 0 to 100 (higher is better and slower)." } attr { name: "progressive" type: "bool" default_value { b: false } description: "If True, create a JPEG that loads progressively (coarse to fine)." } attr { name: "optimize_size" type: "bool" default_value { b: false } description: "If True, spend CPU/RAM to reduce size with no quality change." } attr { name: "chroma_downsampling" type: "bool" default_value { b: true } description: "See http://en.wikipedia.org/wiki/Chroma_subsampling." } attr { name: "density_unit" type: "string" default_value { s: "in" } description: "Unit used to specify `x_density` and `y_density`:\npixels per inch (`\'in\'`) or centimeter (`\'cm\'`)." allowed_values { list { s: "in" s: "cm" } } } attr { name: "x_density" type: "int" default_value { i: 300 } description: "Horizontal pixels per density unit." } attr { name: "y_density" type: "int" default_value { i: 300 } description: "Vertical pixels per density unit." } attr { name: "xmp_metadata" type: "string" default_value { s: "" } description: "If not empty, embed this XMP metadata in the image header." } summary: "JPEG-encode an image." description: "`image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.\n\nThe attr `format` can be used to override the color format of the encoded\noutput. Values can be:\n\n* `\'\'`: Use a default format based on the number of channels in the image.\n* `grayscale`: Output a grayscale JPEG image. The `channels` dimension\n of `image` must be 1.\n* `rgb`: Output an RGB JPEG image. The `channels` dimension\n of `image` must be 3.\n\nIf `format` is not specified or is the empty string, a default format is picked\nin function of the number of channels in `image`:\n\n* 1: Output a grayscale image.\n* 3: Output an RGB image." } op { name: "EncodePng" input_arg { name: "image" description: "3-D with shape `[height, width, channels]`." type_attr: "T" } output_arg { name: "contents" description: "0-D. PNG-encoded image." type: DT_STRING } attr { name: "compression" type: "int" default_value { i: -1 } description: "Compression level." } attr { name: "T" type: "type" default_value { type: DT_UINT8 } allowed_values { list { type: DT_UINT8 type: DT_UINT16 } } } summary: "PNG-encode an image." description: "`image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`\nwhere `channels` is:\n\n* 1: for grayscale.\n* 2: for grayscale + alpha.\n* 3: for RGB.\n* 4: for RGBA.\n\nThe ZLIB compression level, `compression`, can be -1 for the PNG-encoder\ndefault or a value from 0 to 9. 9 is the highest compression level, generating\nthe smallest output, but is slower." } op { name: "EncodeWav" input_arg { name: "audio" description: "2-D with shape `[length, channels]`." type: DT_FLOAT } input_arg { name: "sample_rate" description: "Scalar containing the sample frequency." type: DT_INT32 } output_arg { name: "contents" description: "0-D. WAV-encoded file contents." type: DT_STRING } summary: "Encode audio data using the WAV file format." description: "This operation will generate a string suitable to be saved out to create a .wav\naudio file. It will be encoded in the 16-bit PCM format. It takes in float\nvalues in the range -1.0f to 1.0f, and any outside that value will be clamped to\nthat range.\n\n`audio` is a 2-D float Tensor of shape `[length, channels]`.\n`sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100)." } op { name: "Enter" input_arg { name: "data" description: "The tensor to be made available to the child frame." type_attr: "T" } output_arg { name: "output" description: "The same tensor as `data`." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "frame_name" type: "string" description: "The name of the child frame." } attr { name: "is_constant" type: "bool" default_value { b: false } description: "If true, the output is constant within the child frame." } attr { name: "parallel_iterations" type: "int" default_value { i: 10 } description: "The number of iterations allowed to run in parallel." } summary: "Creates or finds a child frame, and makes `data` available to the child frame." description: "This op is used together with `Exit` to create loops in the graph.\nThe unique `frame_name` is used by the `Executor` to identify frames. If\n`is_constant` is true, `output` is a constant in the child frame; otherwise\nit may be changed in the child frame. At most `parallel_iterations` iterations\nare run in parallel in the child frame." } op { name: "Equal" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_QUINT8 type: DT_QINT8 type: DT_QINT32 type: DT_STRING type: DT_BOOL type: DT_COMPLEX128 } } } summary: "Returns the truth value of (x == y) element-wise." description: "*NOTE*: `Equal` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" is_commutative: true } op { name: "Erf" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } summary: "Computes the Gauss error function of `x` element-wise." } op { name: "Erfc" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } summary: "Computes the complementary error function of `x` element-wise." } op { name: "Exit" input_arg { name: "data" description: "The tensor to be made available to the parent frame." type_attr: "T" } output_arg { name: "output" description: "The same tensor as `data`." type_attr: "T" } attr { name: "T" type: "type" } summary: "Exits the current frame to its parent frame." description: "Exit makes its input `data` available to the parent frame." } op { name: "Exp" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes exponential of x element-wise. \\\\(y = e^x\\\\)." } op { name: "ExpandDims" input_arg { name: "input" type_attr: "T" } input_arg { name: "dim" description: "0-D (scalar). Specifies the dimension index at which to\nexpand the shape of `input`. Must be in the range\n`[-rank(input) - 1, rank(input)]`." type_attr: "Tdim" } output_arg { name: "output" description: "Contains the same data as `input`, but its shape has an additional\ndimension of size 1 added." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tdim" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Inserts a dimension of 1 into a tensor\'s shape." description: "Given a tensor `input`, this operation inserts a dimension of 1 at the\ndimension index `dim` of `input`\'s shape. The dimension index `dim` starts at\nzero; if you specify a negative number for `dim` it is counted backward from\nthe end.\n\nThis operation is useful if you want to add a batch dimension to a single\nelement. For example, if you have a single image of shape `[height, width,\nchannels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,\nwhich will make the shape `[1, height, width, channels]`.\n\nOther examples:\n\n```\n# \'t\' is a tensor of shape [2]\nshape(expand_dims(t, 0)) ==> [1, 2]\nshape(expand_dims(t, 1)) ==> [2, 1]\nshape(expand_dims(t, -1)) ==> [2, 1]\n\n# \'t2\' is a tensor of shape [2, 3, 5]\nshape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]\nshape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]\nshape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]\n```\n\nThis operation requires that:\n\n`-1-input.dims() <= dim <= input.dims()`\n\nThis operation is related to `squeeze()`, which removes dimensions of\nsize 1." } op { name: "Expm1" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes exponential of x - 1 element-wise." description: "I.e., \\\\(y = (\\exp x) - 1\\\\)." } op { name: "ExtractGlimpse" input_arg { name: "input" description: "A 4-D float tensor of shape `[batch_size, height, width, channels]`." type: DT_FLOAT } input_arg { name: "size" description: "A 1-D tensor of 2 elements containing the size of the glimpses\nto extract. The glimpse height must be specified first, following\nby the glimpse width." type: DT_INT32 } input_arg { name: "offsets" description: "A 2-D integer tensor of shape `[batch_size, 2]` containing\nthe y, x locations of the center of each window." type: DT_FLOAT } output_arg { name: "glimpse" description: "A tensor representing the glimpses `[batch_size,\nglimpse_height, glimpse_width, channels]`." type: DT_FLOAT } attr { name: "centered" type: "bool" default_value { b: true } description: "indicates if the offset coordinates are centered relative to\nthe image, in which case the (0, 0) offset is relative to the center\nof the input images. If false, the (0,0) offset corresponds to the\nupper left corner of the input images." } attr { name: "normalized" type: "bool" default_value { b: true } description: "indicates if the offset coordinates are normalized." } attr { name: "uniform_noise" type: "bool" default_value { b: true } description: "indicates if the noise should be generated using a\nuniform distribution or a Gaussian distribution." } summary: "Extracts a glimpse from the input tensor." description: "Returns a set of windows called glimpses extracted at location\n`offsets` from the input tensor. If the windows only partially\noverlaps the inputs, the non overlapping areas will be filled with\nrandom noise.\n\nThe result is a 4-D tensor of shape `[batch_size, glimpse_height,\nglimpse_width, channels]`. The channels and batch dimensions are the\nsame as that of the input tensor. The height and width of the output\nwindows are specified in the `size` parameter.\n\nThe argument `normalized` and `centered` controls how the windows are built:\n\n* If the coordinates are normalized but not centered, 0.0 and 1.0\n correspond to the minimum and maximum of each height and width\n dimension.\n* If the coordinates are both normalized and centered, they range from\n -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper\n left corner, the lower right corner is located at (1.0, 1.0) and the\n center is at (0, 0).\n* If the coordinates are not normalized they are interpreted as\n numbers of pixels." } op { name: "ExtractImagePatches" input_arg { name: "images" description: "4-D Tensor with shape `[batch, in_rows, in_cols, depth]`." type_attr: "T" } output_arg { name: "patches" description: "4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *\nksize_cols * depth]` containing image patches with size\n`ksize_rows x ksize_cols x depth` vectorized in the \"depth\" dimension. Note\n`out_rows` and `out_cols` are the dimensions of the output patches." type_attr: "T" } attr { name: "ksizes" type: "list(int)" description: "The size of the sliding window for each dimension of `images`." has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" description: "1-D of length 4. How far the centers of two consecutive patches are in\nthe images. Must be: `[1, stride_rows, stride_cols, 1]`." has_minimum: true minimum: 4 } attr { name: "rates" type: "list(int)" description: "1-D of length 4. Must be: `[1, rate_rows, rate_cols, 1]`. This is the\ninput stride, specifying how far two consecutive patch samples are in the\ninput. Equivalent to extracting patches with\n`patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by\nsubsampling them spatially by a factor of `rates`. This is equivalent to\n`rate` in dilated (a.k.a. Atrous) convolutions." has_minimum: true minimum: 4 } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "padding" type: "string" description: "The type of padding algorithm to use.\n\nWe specify the size-related attributes as:\n\n```python\n ksizes = [1, ksize_rows, ksize_cols, 1]\n strides = [1, strides_rows, strides_cols, 1]\n rates = [1, rates_rows, rates_cols, 1]\n```" allowed_values { list { s: "SAME" s: "VALID" } } } summary: "Extract `patches` from `images` and put them in the \"depth\" output dimension." } op { name: "ExtractJpegShape" input_arg { name: "contents" description: "0-D. The JPEG-encoded image." type: DT_STRING } output_arg { name: "image_shape" description: "1-D. The image shape with format [height, width, channels]." type_attr: "output_type" } attr { name: "output_type" type: "type" default_value { type: DT_INT32 } description: "(Optional) The output type of the operation (int32 or int64).\nDefaults to int32." allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Extract the shape information of a JPEG-encoded image." description: "This op only parses the image header, so it is much faster than DecodeJpeg." } op { name: "FFT" input_arg { name: "input" description: "A complex64 tensor." type: DT_COMPLEX64 } output_arg { name: "output" description: "A complex64 tensor of the same shape as `input`. The inner-most\n dimension of `input` is replaced with its 1D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.fft\n@end_compatibility" type: DT_COMPLEX64 } summary: "Fast Fourier transform." description: "Computes the 1-dimensional discrete Fourier transform over the inner-most\ndimension of `input`." } op { name: "FFT2D" input_arg { name: "input" description: "A complex64 tensor." type: DT_COMPLEX64 } output_arg { name: "output" description: "A complex64 tensor of the same shape as `input`. The inner-most 2\n dimensions of `input` are replaced with their 2D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.fft2\n@end_compatibility" type: DT_COMPLEX64 } summary: "2D fast Fourier transform." description: "Computes the 2-dimensional discrete Fourier transform over the inner-most\n2 dimensions of `input`." } op { name: "FFT3D" input_arg { name: "input" description: "A complex64 tensor." type: DT_COMPLEX64 } output_arg { name: "output" description: "A complex64 tensor of the same shape as `input`. The inner-most 3\n dimensions of `input` are replaced with their 3D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.fftn with 3 dimensions.\n@end_compatibility" type: DT_COMPLEX64 } summary: "3D fast Fourier transform." description: "Computes the 3-dimensional discrete Fourier transform over the inner-most 3\ndimensions of `input`." } op { name: "FIFOQueue" output_arg { name: "handle" description: "The handle to the queue." type: DT_STRING is_ref: true } attr { name: "component_types" type: "list(type)" description: "The type of each component in a value." has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" default_value { list { } } description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time." has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions." } summary: "A queue that produces elements in first-in first-out order." is_stateful: true } op { name: "FIFOQueueV2" output_arg { name: "handle" description: "The handle to the queue." type: DT_RESOURCE } attr { name: "component_types" type: "list(type)" description: "The type of each component in a value." has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" default_value { list { } } description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time." has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions." } summary: "A queue that produces elements in first-in first-out order." is_stateful: true } op { name: "Fact" output_arg { name: "fact" type: DT_STRING } summary: "Output a fact about factorials." } op { name: "FakeQuantWithMinMaxArgs" input_arg { name: "inputs" type: DT_FLOAT } output_arg { name: "outputs" type: DT_FLOAT } attr { name: "min" type: "float" default_value { f: -6 } } attr { name: "max" type: "float" default_value { f: 6 } } attr { name: "num_bits" type: "int" default_value { i: 8 } } attr { name: "narrow_range" type: "bool" default_value { b: false } } summary: "Fake-quantize the \'inputs\' tensor, type float to \'outputs\' tensor of same type." description: "Attributes `[min; max]` define the clamping range for the `inputs` data.\n`inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`\nwhen `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and\nthen de-quantized and output as floats in `[min; max]` interval.\n`num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.\n\nQuantization is called fake since the output is still in floating point." } op { name: "FakeQuantWithMinMaxArgsGradient" input_arg { name: "gradients" description: "Backpropagated gradients above the FakeQuantWithMinMaxArgs operation." type: DT_FLOAT } input_arg { name: "inputs" description: "Values passed as inputs to the FakeQuantWithMinMaxArgs operation." type: DT_FLOAT } output_arg { name: "backprops" description: "Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:\n`gradients * (inputs >= min && inputs <= max)`." type: DT_FLOAT } attr { name: "min" type: "float" default_value { f: -6 } } attr { name: "max" type: "float" default_value { f: 6 } } attr { name: "num_bits" type: "int" default_value { i: 8 } } attr { name: "narrow_range" type: "bool" default_value { b: false } } summary: "Compute gradients for a FakeQuantWithMinMaxArgs operation." } op { name: "FakeQuantWithMinMaxVars" input_arg { name: "inputs" type: DT_FLOAT } input_arg { name: "min" type: DT_FLOAT } input_arg { name: "max" type: DT_FLOAT } output_arg { name: "outputs" type: DT_FLOAT } attr { name: "num_bits" type: "int" default_value { i: 8 } } attr { name: "narrow_range" type: "bool" default_value { b: false } } summary: "Fake-quantize the \'inputs\' tensor of type float via global float scalars `min`" description: "and `max` to \'outputs\' tensor of same shape as `inputs`.\n\n`[min; max]` define the clamping range for the `inputs` data.\n`inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`\nwhen `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and\nthen de-quantized and output as floats in `[min; max]` interval.\n`num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.\n\nThis operation has a gradient and thus allows for training `min` and `max`\nvalues." } op { name: "FakeQuantWithMinMaxVarsGradient" input_arg { name: "gradients" description: "Backpropagated gradients above the FakeQuantWithMinMaxVars operation." type: DT_FLOAT } input_arg { name: "inputs" description: "Values passed as inputs to the FakeQuantWithMinMaxVars operation.\nmin, max: Quantization interval, scalar floats." type: DT_FLOAT } input_arg { name: "min" type: DT_FLOAT } input_arg { name: "max" type: DT_FLOAT } output_arg { name: "backprops_wrt_input" description: "Backpropagated gradients w.r.t. inputs:\n`gradients * (inputs >= min && inputs <= max)`." type: DT_FLOAT } output_arg { name: "backprop_wrt_min" description: "Backpropagated gradients w.r.t. min parameter:\n`sum(gradients * (inputs < min))`." type: DT_FLOAT } output_arg { name: "backprop_wrt_max" description: "Backpropagated gradients w.r.t. max parameter:\n`sum(gradients * (inputs > max))`." type: DT_FLOAT } attr { name: "num_bits" type: "int" default_value { i: 8 } description: "The bitwidth of the quantization; between 2 and 8, inclusive." } attr { name: "narrow_range" type: "bool" default_value { b: false } description: "Whether to quantize into 2^num_bits - 1 distinct values." } summary: "Compute gradients for a FakeQuantWithMinMaxVars operation." } op { name: "FakeQuantWithMinMaxVarsPerChannel" input_arg { name: "inputs" type: DT_FLOAT } input_arg { name: "min" type: DT_FLOAT } input_arg { name: "max" type: DT_FLOAT } output_arg { name: "outputs" type: DT_FLOAT } attr { name: "num_bits" type: "int" default_value { i: 8 } } attr { name: "narrow_range" type: "bool" default_value { b: false } } summary: "Fake-quantize the \'inputs\' tensor of type float and one of the shapes: `[d]`," description: "`[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`\nto \'outputs\' tensor of same shape as `inputs`.\n\n`[min; max]` define the clamping range for the `inputs` data.\n`inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`\nwhen `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and\nthen de-quantized and output as floats in `[min; max]` interval.\n`num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.\n\nThis operation has a gradient and thus allows for training `min` and `max`\nvalues." } op { name: "FakeQuantWithMinMaxVarsPerChannelGradient" input_arg { name: "gradients" description: "Backpropagated gradients above the FakeQuantWithMinMaxVars operation,\nshape one of: `[d]`, `[b, d]`, `[b, h, w, d]`." type: DT_FLOAT } input_arg { name: "inputs" description: "Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape\n same as `gradients`.\nmin, max: Quantization interval, floats of shape `[d]`." type: DT_FLOAT } input_arg { name: "min" type: DT_FLOAT } input_arg { name: "max" type: DT_FLOAT } output_arg { name: "backprops_wrt_input" description: "Backpropagated gradients w.r.t. inputs, shape same as\n`inputs`:\n `gradients * (inputs >= min && inputs <= max)`." type: DT_FLOAT } output_arg { name: "backprop_wrt_min" description: "Backpropagated gradients w.r.t. min parameter, shape `[d]`:\n`sum_per_d(gradients * (inputs < min))`." type: DT_FLOAT } output_arg { name: "backprop_wrt_max" description: "Backpropagated gradients w.r.t. max parameter, shape `[d]`:\n`sum_per_d(gradients * (inputs > max))`." type: DT_FLOAT } attr { name: "num_bits" type: "int" default_value { i: 8 } description: "The bitwidth of the quantization; between 2 and 8, inclusive." } attr { name: "narrow_range" type: "bool" default_value { b: false } description: "Whether to quantize into 2^num_bits - 1 distinct values." } summary: "Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation." } op { name: "FakeQueue" input_arg { name: "resource" type: DT_RESOURCE } output_arg { name: "handle" type: DT_STRING is_ref: true } summary: "Deprecated. Do not use." is_stateful: true } op { name: "Fill" input_arg { name: "dims" description: "1-D. Represents the shape of the output tensor." type: DT_INT32 } input_arg { name: "value" description: "0-D (scalar). Value to fill the returned tensor.\n\n@compatibility(numpy)\nEquivalent to np.full\n@end_compatibility" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } summary: "Creates a tensor filled with a scalar value." description: "This operation creates a tensor of shape `dims` and fills it with `value`.\n\nFor example:\n\n```\n# Output tensor has shape [2, 3].\nfill([2, 3], 9) ==> [[9, 9, 9]\n [9, 9, 9]]\n```" } op { name: "FilterDataset" input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "other_arguments" description: "A list of tensors, typically values that were captured when\nbuilding a closure for `predicate`." type_list_attr: "Targuments" } output_arg { name: "handle" type: DT_VARIANT } attr { name: "predicate" type: "func" description: "A function returning a scalar boolean." } attr { name: "Targuments" type: "list(type)" has_minimum: true } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "Creates a dataset containing elements of `input_dataset` matching `predicate`." description: "The `predicate` function must return a scalar boolean and accept the\nfollowing arguments:\n\n* One tensor for each component of an element of `input_dataset`.\n* One tensor for each value in `other_arguments`." } op { name: "FixedLengthRecordDataset" input_arg { name: "filenames" description: "A scalar or a vector containing the name(s) of the file(s) to be\nread." type: DT_STRING } input_arg { name: "header_bytes" description: "A scalar representing the number of bytes to skip at the\nbeginning of a file." type: DT_INT64 } input_arg { name: "record_bytes" description: "A scalar representing the number of bytes in each record." type: DT_INT64 } input_arg { name: "footer_bytes" description: "A scalar representing the number of bytes to skip at the end\nof a file." type: DT_INT64 } input_arg { name: "buffer_size" description: "A scalar representing the number of bytes to buffer. Must be > 0." type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } summary: "Creates a dataset that emits the records from one or more binary files." is_stateful: true } op { name: "FixedLengthRecordReader" output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_STRING is_ref: true } attr { name: "header_bytes" type: "int" default_value { i: 0 } description: "Number of bytes in the header, defaults to 0." } attr { name: "record_bytes" type: "int" description: "Number of bytes in the record." } attr { name: "footer_bytes" type: "int" default_value { i: 0 } description: "Number of bytes in the footer, defaults to 0." } attr { name: "hop_bytes" type: "int" default_value { i: 0 } description: "Number of bytes to hop before each read. Default of 0 means using\nrecord_bytes." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } summary: "A Reader that outputs fixed-length records from a file." is_stateful: true } op { name: "FixedLengthRecordReaderV2" output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_RESOURCE } attr { name: "header_bytes" type: "int" default_value { i: 0 } description: "Number of bytes in the header, defaults to 0." } attr { name: "record_bytes" type: "int" description: "Number of bytes in the record." } attr { name: "footer_bytes" type: "int" default_value { i: 0 } description: "Number of bytes in the footer, defaults to 0." } attr { name: "hop_bytes" type: "int" default_value { i: 0 } description: "Number of bytes to hop before each read. Default of 0 means using\nrecord_bytes." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } attr { name: "encoding" type: "string" default_value { s: "" } description: "The type of encoding for the file. Currently ZLIB and GZIP\nare supported. Defaults to none." } summary: "A Reader that outputs fixed-length records from a file." is_stateful: true } op { name: "FixedUnigramCandidateSampler" input_arg { name: "true_classes" description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label." type: DT_INT64 } output_arg { name: "sampled_candidates" description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate." type: DT_INT64 } output_arg { name: "true_expected_count" description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability." type: DT_FLOAT } output_arg { name: "sampled_expected_count" description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability." type: DT_FLOAT } attr { name: "num_true" type: "int" description: "Number of true labels per context." has_minimum: true minimum: 1 } attr { name: "num_sampled" type: "int" description: "Number of candidates to randomly sample." has_minimum: true minimum: 1 } attr { name: "unique" type: "bool" description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities." } attr { name: "range_max" type: "int" description: "The sampler will sample integers from the interval [0, range_max)." has_minimum: true minimum: 1 } attr { name: "vocab_file" type: "string" default_value { s: "" } description: "Each valid line in this file (which should have a CSV-like format)\ncorresponds to a valid word ID. IDs are in sequential order, starting from\nnum_reserved_ids. The last entry in each line is expected to be a value\ncorresponding to the count or relative probability. Exactly one of vocab_file\nand unigrams needs to be passed to this op." } attr { name: "distortion" type: "float" default_value { f: 1 } description: "The distortion is used to skew the unigram probability distribution.\nEach weight is first raised to the distortion\'s power before adding to the\ninternal unigram distribution. As a result, distortion = 1.0 gives regular\nunigram sampling (as defined by the vocab file), and distortion = 0.0 gives\na uniform distribution." } attr { name: "num_reserved_ids" type: "int" default_value { i: 0 } description: "Optionally some reserved IDs can be added in the range [0,\n..., num_reserved_ids) by the users. One use case is that a special unknown\nword token is used as ID 0. These IDs will have a sampling probability of 0." } attr { name: "num_shards" type: "int" default_value { i: 1 } description: "A sampler can be used to sample from a subset of the original range\nin order to speed up the whole computation through parallelism. This parameter\n(together with \'shard\') indicates the number of partitions that are being\nused in the overall computation." has_minimum: true minimum: 1 } attr { name: "shard" type: "int" default_value { i: 0 } description: "A sampler can be used to sample from a subset of the original range\nin order to speed up the whole computation through parallelism. This parameter\n(together with \'num_shards\') indicates the particular partition number of a\nsampler op, when partitioning is being used." has_minimum: true } attr { name: "unigrams" type: "list(float)" default_value { list { } } description: "A list of unigram counts or probabilities, one per ID in sequential\norder. Exactly one of vocab_file and unigrams should be passed to this op." } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "An second seed to avoid seed collision." } summary: "Generates labels for candidate sampling with a learned unigram distribution." description: "A unigram sampler could use a fixed unigram distribution read from a\nfile or passed in as an in-memory array instead of building up the distribution\nfrom data on the fly. There is also an option to skew the distribution by\napplying a distortion power to the weights.\n\nThe vocabulary file should be in CSV-like format, with the last field\nbeing the weight associated with the word.\n\nFor each batch, this op picks a single set of sampled candidate labels.\n\nThe advantages of sampling candidates per-batch are simplicity and the\npossibility of efficient dense matrix multiplication. The disadvantage is that\nthe sampled candidates must be chosen independently of the context and of the\ntrue labels." is_stateful: true } op { name: "FlatMapDataset" input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "other_arguments" type_list_attr: "Targuments" } output_arg { name: "handle" type: DT_VARIANT } attr { name: "f" type: "func" description: "A function mapping elements of `input_dataset`, concatenated with\n`other_arguments`, to a Dataset variant that contains elements matching\n`output_types` and `output_shapes`." } attr { name: "Targuments" type: "list(type)" has_minimum: true } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`." description: "Unlike MapDataset, the `f` in FlatMapDataset is expected to return a\nDataset variant, and FlatMapDataset will flatten successive results\ninto a single Dataset." } op { name: "Floor" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } summary: "Returns element-wise largest integer not greater than x." } op { name: "FloorDiv" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Returns x // y element-wise." description: "*NOTE*: `FloorDiv` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" } op { name: "FloorMod" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE } } } summary: "Returns element-wise remainder of division. When `x < 0` xor `y < 0` is" description: "true, this follows Python semantics in that the result here is consistent\nwith a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.\n\n*NOTE*: `FloorMod` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" } op { name: "FractionalAvgPool" input_arg { name: "value" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } output_arg { name: "output" description: "output tensor after fractional avg pooling." type_attr: "T" } output_arg { name: "row_pooling_sequence" description: "row pooling sequence, needed to calculate gradient." type: DT_INT64 } output_arg { name: "col_pooling_sequence" description: "column pooling sequence, needed to calculate gradient." type: DT_INT64 } attr { name: "pooling_ratio" type: "list(float)" description: "Pooling ratio for each dimension of `value`, currently only\nsupports row and col dimension and should be >= 1.0. For example, a valid\npooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements\nmust be 1.0 because we don\'t allow pooling on batch and channels\ndimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions\nrespectively." has_minimum: true minimum: 4 } attr { name: "pseudo_random" type: "bool" default_value { b: false } description: "When set to True, generates the pooling sequence in a\npseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin\nGraham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for\ndifference between pseudorandom and random." } attr { name: "overlapping" type: "bool" default_value { b: false } description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index 0 1 2 3 4`\n\n`value 20 5 16 3 7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [41/3, 26/3] for fractional avg pooling." } attr { name: "deterministic" type: "bool" default_value { b: false } description: "When set to True, a fixed pooling region will be used when\niterating over a FractionalAvgPool node in the computation graph. Mainly used\nin unit test to make FractionalAvgPool deterministic." } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "An second seed to avoid seed collision." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } summary: "Performs fractional average pooling on the input." description: "Fractional average pooling is similar to Fractional max pooling in the pooling\nregion generation step. The only difference is that after pooling regions are\ngenerated, a mean operation is performed instead of a max operation in each\npooling region." } op { name: "FractionalAvgPoolGrad" input_arg { name: "orig_input_tensor_shape" description: "Original input tensor shape for `fractional_avg_pool`" type: DT_INT64 } input_arg { name: "out_backprop" description: "4-D with shape `[batch, height, width, channels]`. Gradients\nw.r.t. the output of `fractional_avg_pool`." type_attr: "T" } input_arg { name: "row_pooling_sequence" description: "row pooling sequence, form pooling region with\ncol_pooling_sequence." type: DT_INT64 } input_arg { name: "col_pooling_sequence" description: "column pooling sequence, form pooling region with\nrow_pooling sequence." type: DT_INT64 } output_arg { name: "output" description: "4-D. Gradients w.r.t. the input of `fractional_avg_pool`." type_attr: "T" } attr { name: "overlapping" type: "bool" default_value { b: false } description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index 0 1 2 3 4`\n\n`value 20 5 16 3 7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [41/3, 26/3] for fractional avg pooling." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } summary: "Computes gradient of the FractionalAvgPool function." description: "Unlike FractionalMaxPoolGrad, we don\'t need to find arg_max for\nFractionalAvgPoolGrad, we just need to evenly back-propagate each element of\nout_backprop to those indices that form the same pooling cell. Therefore, we\njust need to know the shape of original input tensor, instead of the whole\ntensor." } op { name: "FractionalMaxPool" input_arg { name: "value" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } output_arg { name: "output" description: "output tensor after fractional max pooling." type_attr: "T" } output_arg { name: "row_pooling_sequence" description: "row pooling sequence, needed to calculate gradient." type: DT_INT64 } output_arg { name: "col_pooling_sequence" description: "column pooling sequence, needed to calculate gradient." type: DT_INT64 } attr { name: "pooling_ratio" type: "list(float)" description: "Pooling ratio for each dimension of `value`, currently only\nsupports row and col dimension and should be >= 1.0. For example, a valid\npooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements\nmust be 1.0 because we don\'t allow pooling on batch and channels\ndimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions\nrespectively." has_minimum: true minimum: 4 } attr { name: "pseudo_random" type: "bool" default_value { b: false } description: "When set to True, generates the pooling sequence in a\npseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin\nGraham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for\ndifference between pseudorandom and random." } attr { name: "overlapping" type: "bool" default_value { b: false } description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index 0 1 2 3 4`\n\n`value 20 5 16 3 7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [20, 16] for fractional max pooling." } attr { name: "deterministic" type: "bool" default_value { b: false } description: "When set to True, a fixed pooling region will be used when\niterating over a FractionalMaxPool node in the computation graph. Mainly used\nin unit test to make FractionalMaxPool deterministic." } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "An second seed to avoid seed collision." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } summary: "Performs fractional max pooling on the input." description: "Fractional max pooling is slightly different than regular max pooling. In\nregular max pooling, you downsize an input set by taking the maximum value of\nsmaller N x N subsections of the set (often 2x2), and try to reduce the set by\na factor of N, where N is an integer. Fractional max pooling, as you might\nexpect from the word \"fractional\", means that the overall reduction ratio N\ndoes not have to be an integer.\n\nThe sizes of the pooling regions are generated randomly but are fairly uniform.\nFor example, let\'s look at the height dimension, and the constraints on the\nlist of rows that will be pool boundaries.\n\nFirst we define the following:\n\n1. input_row_length : the number of rows from the input set\n2. output_row_length : which will be smaller than the input\n3. alpha = input_row_length / output_row_length : our reduction ratio\n4. K = floor(alpha)\n5. row_pooling_sequence : this is the result list of pool boundary rows\n\nThen, row_pooling_sequence should satisfy:\n\n1. a[0] = 0 : the first value of the sequence is 0\n2. a[end] = input_row_length : the last value of the sequence is the size\n3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size\n4. length(row_pooling_sequence) = output_row_length+1\n\nFor more details on fractional max pooling, see this paper:\n[Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)" } op { name: "FractionalMaxPoolGrad" input_arg { name: "orig_input" description: "Original input for `fractional_max_pool`" type_attr: "T" } input_arg { name: "orig_output" description: "Original output for `fractional_max_pool`" type_attr: "T" } input_arg { name: "out_backprop" description: "4-D with shape `[batch, height, width, channels]`. Gradients\nw.r.t. the output of `fractional_max_pool`." type_attr: "T" } input_arg { name: "row_pooling_sequence" description: "row pooling sequence, form pooling region with\ncol_pooling_sequence." type: DT_INT64 } input_arg { name: "col_pooling_sequence" description: "column pooling sequence, form pooling region with\nrow_pooling sequence." type: DT_INT64 } output_arg { name: "output" description: "4-D. Gradients w.r.t. the input of `fractional_max_pool`." type_attr: "T" } attr { name: "overlapping" type: "bool" default_value { b: false } description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index 0 1 2 3 4`\n\n`value 20 5 16 3 7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [20, 16] for fractional max pooling." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } summary: "Computes gradient of the FractionalMaxPool function." } op { name: "FusedBatchNorm" input_arg { name: "x" description: "A 4D Tensor for input data." type_attr: "T" } input_arg { name: "scale" description: "A 1D Tensor for scaling factor, to scale the normalized x." type_attr: "T" } input_arg { name: "offset" description: "A 1D Tensor for offset, to shift to the normalized x." type_attr: "T" } input_arg { name: "mean" description: "A 1D Tensor for population mean. Used for inference only;\nmust be empty for training." type_attr: "T" } input_arg { name: "variance" description: "A 1D Tensor for population variance. Used for inference only;\nmust be empty for training." type_attr: "T" } output_arg { name: "y" description: "A 4D Tensor for output data." type_attr: "T" } output_arg { name: "batch_mean" description: "A 1D Tensor for the computed batch mean, to be used by TensorFlow\nto compute the running mean." type_attr: "T" } output_arg { name: "batch_variance" description: "A 1D Tensor for the computed batch variance, to be used by\nTensorFlow to compute the running variance." type_attr: "T" } output_arg { name: "reserve_space_1" description: "A 1D Tensor for the computed batch mean, to be reused\nin the gradient computation." type_attr: "T" } output_arg { name: "reserve_space_2" description: "A 1D Tensor for the computed batch variance (inverted variance\nin the cuDNN case), to be reused in the gradient computation." type_attr: "T" } attr { name: "T" type: "type" description: "The data type for the elements of input and output Tensors." allowed_values { list { type: DT_FLOAT } } } attr { name: "epsilon" type: "float" default_value { f: 0.0001 } description: "A small float number added to the variance of x." } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "The data format for x and y. Either \"NHWC\" (default) or \"NCHW\"." } attr { name: "is_training" type: "bool" default_value { b: true } description: "A bool value to indicate the operation is for training (default)\nor inference." } summary: "Batch normalization." description: "Note that the size of 4D Tensors are defined by either \"NHWC\" or \"NCHW\".\nThe size of 1D Tensors matches the dimension C of the 4D Tensors." } op { name: "FusedBatchNormGrad" input_arg { name: "y_backprop" description: "A 4D Tensor for the gradient with respect to y." type_attr: "T" } input_arg { name: "x" description: "A 4D Tensor for input data." type_attr: "T" } input_arg { name: "scale" description: "A 1D Tensor for scaling factor, to scale the normalized x." type_attr: "T" } input_arg { name: "reserve_space_1" description: "When is_training is True, a 1D Tensor for the computed batch\nmean to be reused in gradient computation. When is_training is\nFalse, a 1D Tensor for the population mean to be reused in both\n1st and 2nd order gradient computation." type_attr: "T" } input_arg { name: "reserve_space_2" description: "When is_training is True, a 1D Tensor for the computed batch\nvariance (inverted variance in the cuDNN case) to be reused in\ngradient computation. When is_training is False, a 1D Tensor\nfor the population variance to be reused in both 1st and 2nd\norder gradient computation." type_attr: "T" } output_arg { name: "x_backprop" description: "A 4D Tensor for the gradient with respect to x." type_attr: "T" } output_arg { name: "scale_backprop" description: "A 1D Tensor for the gradient with respect to scale." type_attr: "T" } output_arg { name: "offset_backprop" description: "A 1D Tensor for the gradient with respect to offset." type_attr: "T" } output_arg { name: "reserve_space_3" description: "Unused placeholder to match the mean input in FusedBatchNorm." type_attr: "T" } output_arg { name: "reserve_space_4" description: "Unused placeholder to match the variance input\nin FusedBatchNorm." type_attr: "T" } attr { name: "T" type: "type" description: "The data type for the elements of input and output Tensors." allowed_values { list { type: DT_FLOAT } } } attr { name: "epsilon" type: "float" default_value { f: 0.0001 } description: "A small float number added to the variance of x." } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "The data format for y_backprop, x, x_backprop.\nEither \"NHWC\" (default) or \"NCHW\"." } attr { name: "is_training" type: "bool" default_value { b: true } description: "A bool value to indicate the operation is for training (default)\nor inference." } summary: "Gradient for batch normalization." description: "Note that the size of 4D Tensors are defined by either \"NHWC\" or \"NCHW\".\nThe size of 1D Tensors matches the dimension C of the 4D Tensors." } op { name: "FusedBatchNormGradV2" input_arg { name: "y_backprop" description: "A 4D Tensor for the gradient with respect to y." type_attr: "T" } input_arg { name: "x" description: "A 4D Tensor for input data." type_attr: "T" } input_arg { name: "scale" description: "A 1D Tensor for scaling factor, to scale the normalized x." type: DT_FLOAT } input_arg { name: "reserve_space_1" description: "When is_training is True, a 1D Tensor for the computed batch\nmean to be reused in gradient computation. When is_training is\nFalse, a 1D Tensor for the population mean to be reused in both\n1st and 2nd order gradient computation." type_attr: "U" } input_arg { name: "reserve_space_2" description: "When is_training is True, a 1D Tensor for the computed batch\nvariance (inverted variance in the cuDNN case) to be reused in\ngradient computation. When is_training is False, a 1D Tensor\nfor the population variance to be reused in both 1st and 2nd\norder gradient computation." type_attr: "U" } output_arg { name: "x_backprop" description: "A 4D Tensor for the gradient with respect to x." type_attr: "T" } output_arg { name: "scale_backprop" description: "A 1D Tensor for the gradient with respect to scale." type_attr: "U" } output_arg { name: "offset_backprop" description: "A 1D Tensor for the gradient with respect to offset." type_attr: "U" } output_arg { name: "reserve_space_3" description: "Unused placeholder to match the mean input in FusedBatchNorm." type_attr: "U" } output_arg { name: "reserve_space_4" description: "Unused placeholder to match the variance input\nin FusedBatchNorm." type_attr: "U" } attr { name: "T" type: "type" description: "The data type for the elements of input and output Tensors." allowed_values { list { type: DT_HALF type: DT_FLOAT } } } attr { name: "U" type: "type" description: "The data type for the scale, offset, mean, and variance." allowed_values { list { type: DT_FLOAT } } } attr { name: "epsilon" type: "float" default_value { f: 0.0001 } description: "A small float number added to the variance of x." } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "The data format for y_backprop, x, x_backprop.\nEither \"NHWC\" (default) or \"NCHW\"." } attr { name: "is_training" type: "bool" default_value { b: true } description: "A bool value to indicate the operation is for training (default)\nor inference." } summary: "Gradient for batch normalization." description: "Note that the size of 4D Tensors are defined by either \"NHWC\" or \"NCHW\".\nThe size of 1D Tensors matches the dimension C of the 4D Tensors." } op { name: "FusedBatchNormV2" input_arg { name: "x" description: "A 4D Tensor for input data." type_attr: "T" } input_arg { name: "scale" description: "A 1D Tensor for scaling factor, to scale the normalized x." type_attr: "U" } input_arg { name: "offset" description: "A 1D Tensor for offset, to shift to the normalized x." type_attr: "U" } input_arg { name: "mean" description: "A 1D Tensor for population mean. Used for inference only;\nmust be empty for training." type_attr: "U" } input_arg { name: "variance" description: "A 1D Tensor for population variance. Used for inference only;\nmust be empty for training." type_attr: "U" } output_arg { name: "y" description: "A 4D Tensor for output data." type_attr: "T" } output_arg { name: "batch_mean" description: "A 1D Tensor for the computed batch mean, to be used by TensorFlow\nto compute the running mean." type_attr: "U" } output_arg { name: "batch_variance" description: "A 1D Tensor for the computed batch variance, to be used by\nTensorFlow to compute the running variance." type_attr: "U" } output_arg { name: "reserve_space_1" description: "A 1D Tensor for the computed batch mean, to be reused\nin the gradient computation." type_attr: "U" } output_arg { name: "reserve_space_2" description: "A 1D Tensor for the computed batch variance (inverted variance\nin the cuDNN case), to be reused in the gradient computation." type_attr: "U" } attr { name: "T" type: "type" description: "The data type for the elements of input and output Tensors." allowed_values { list { type: DT_HALF type: DT_FLOAT } } } attr { name: "U" type: "type" description: "The data type for the scale, offset, mean, and variance." allowed_values { list { type: DT_FLOAT } } } attr { name: "epsilon" type: "float" default_value { f: 0.0001 } description: "A small float number added to the variance of x." } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "The data format for x and y. Either \"NHWC\" (default) or \"NCHW\"." } attr { name: "is_training" type: "bool" default_value { b: true } description: "A bool value to indicate the operation is for training (default)\nor inference." } summary: "Batch normalization." description: "Note that the size of 4D Tensors are defined by either \"NHWC\" or \"NCHW\".\nThe size of 1D Tensors matches the dimension C of the 4D Tensors." } op { name: "FusedPadConv2D" input_arg { name: "input" description: "4-D with shape `[batch, in_height, in_width, in_channels]`." type_attr: "T" } input_arg { name: "paddings" description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`." type: DT_INT32 } input_arg { name: "filter" description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`." type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT } } } attr { name: "mode" type: "string" allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } } } attr { name: "strides" type: "list(int)" description: "1-D of length 4. The stride of the sliding window for each dimension\nof `input`. Must be in the same order as the dimension specified with format." } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } summary: "Performs a padding as a preprocess during a convolution." description: "Similar to FusedResizeAndPadConv2d, this op allows for an optimized\nimplementation where the spatial padding transformation stage is fused with the\nim2col lookup, but in this case without the bilinear filtering required for\nresizing. Fusing the padding prevents the need to write out the intermediate\nresults as whole tensors, reducing memory pressure, and we can get some latency\ngains by merging the transformation calculations.\nThe data_format attribute for Conv2D isn\'t supported by this op, and \'NHWC\'\norder is used instead.\nInternally this op uses a single per-graph scratch buffer, which means that it\nwill block if multiple versions are being run in parallel. This is because this\noperator is primarily an optimization to minimize memory usage." } op { name: "FusedResizeAndPadConv2D" input_arg { name: "input" description: "4-D with shape `[batch, in_height, in_width, in_channels]`." type_attr: "T" } input_arg { name: "size" description: "A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images." type: DT_INT32 } input_arg { name: "paddings" description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`." type: DT_INT32 } input_arg { name: "filter" description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`." type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT } } } attr { name: "resize_align_corners" type: "bool" default_value { b: false } description: "If true, rescale input by (new_height - 1) / (height - 1),\nwhich exactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension." } attr { name: "mode" type: "string" allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } } } attr { name: "strides" type: "list(int)" description: "1-D of length 4. The stride of the sliding window for each dimension\nof `input`. Must be in the same order as the dimension specified with format." } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } summary: "Performs a resize and padding as a preprocess during a convolution." description: "It\'s often possible to do spatial transformations more efficiently as part of\nthe packing stage of a convolution, so this op allows for an optimized\nimplementation where these stages are fused together. This prevents the need to\nwrite out the intermediate results as whole tensors, reducing memory pressure,\nand we can get some latency gains by merging the transformation calculations.\nThe data_format attribute for Conv2D isn\'t supported by this op, and defaults to\n\'NHWC\' order.\nInternally this op uses a single per-graph scratch buffer, which means that it\nwill block if multiple versions are being run in parallel. This is because this\noperator is primarily an optimization to minimize memory usage." } op { name: "Gather" input_arg { name: "params" type_attr: "Tparams" } input_arg { name: "indices" type_attr: "Tindices" } output_arg { name: "output" type_attr: "Tparams" } attr { name: "validate_indices" type: "bool" default_value { b: true } } attr { name: "Tparams" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Gather slices from `params` according to `indices`." description: "`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).\nProduces an output tensor with shape `indices.shape + params.shape[1:]` where:\n\n```python\n # Scalar indices\n output[:, ..., :] = params[indices, :, ... :]\n\n # Vector indices\n output[i, :, ..., :] = params[indices[i], :, ... :]\n\n # Higher rank indices\n output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]\n```\n\nIf `indices` is a permutation and `len(indices) == params.shape[0]` then\nthis operation will permute `params` accordingly.\n\n`validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in\n`indices` are always validated to be within range. If assigned to GPU,\nout-of-bound indices result in safe but unspecified behavior, which may include\nraising an error.\n\n
\n\n
" } op { name: "GatherNd" input_arg { name: "params" description: "The tensor from which to gather values." type_attr: "Tparams" } input_arg { name: "indices" description: "Index tensor." type_attr: "Tindices" } output_arg { name: "output" description: "Values from `params` gathered from indices given by `indices`, with\nshape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`." type_attr: "Tparams" } attr { name: "Tparams" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Gather slices from `params` into a Tensor with shape specified by `indices`." description: "`indices` is an K-dimensional integer tensor, best thought of as a\n(K-1)-dimensional tensor of indices into `params`, where each element defines a\nslice of `params`:\n\n output[i_0, ..., i_{K-2}] = params[indices[i0, ..., i_{K-2}]]\n\nWhereas in @{tf.gather} `indices` defines slices into the first\ndimension of `params`, in `tf.gather_nd`, `indices` defines slices into the\nfirst `N` dimensions of `params`, where `N = indices.shape[-1]`.\n\nThe last dimension of `indices` can be at most the rank of\n`params`:\n\n indices.shape[-1] <= params.rank\n\nThe last dimension of `indices` corresponds to elements\n(if `indices.shape[-1] == params.rank`) or slices\n(if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`\nof `params`. The output tensor has shape\n\n indices.shape[:-1] + params.shape[indices.shape[-1]:]\n\nSome examples below.\n\nSimple indexing into a matrix:\n\n```python\n indices = [[0, 0], [1, 1]]\n params = [[\'a\', \'b\'], [\'c\', \'d\']]\n output = [\'a\', \'d\']\n```\n\nSlice indexing into a matrix:\n\n```python\n indices = [[1], [0]]\n params = [[\'a\', \'b\'], [\'c\', \'d\']]\n output = [[\'c\', \'d\'], [\'a\', \'b\']]\n```\n\nIndexing into a 3-tensor:\n\n```python\n indices = [[1]]\n params = [[[\'a0\', \'b0\'], [\'c0\', \'d0\']],\n [[\'a1\', \'b1\'], [\'c1\', \'d1\']]]\n output = [[[\'a1\', \'b1\'], [\'c1\', \'d1\']]]\n\n\n indices = [[0, 1], [1, 0]]\n params = [[[\'a0\', \'b0\'], [\'c0\', \'d0\']],\n [[\'a1\', \'b1\'], [\'c1\', \'d1\']]]\n output = [[\'c0\', \'d0\'], [\'a1\', \'b1\']]\n\n\n indices = [[0, 0, 1], [1, 0, 1]]\n params = [[[\'a0\', \'b0\'], [\'c0\', \'d0\']],\n [[\'a1\', \'b1\'], [\'c1\', \'d1\']]]\n output = [\'b0\', \'b1\']\n```\n\nBatched indexing into a matrix:\n\n```python\n indices = [[[0, 0]], [[0, 1]]]\n params = [[\'a\', \'b\'], [\'c\', \'d\']]\n output = [[\'a\'], [\'b\']]\n```\n\nBatched slice indexing into a matrix:\n\n```python\n indices = [[[1]], [[0]]]\n params = [[\'a\', \'b\'], [\'c\', \'d\']]\n output = [[[\'c\', \'d\']], [[\'a\', \'b\']]]\n```\n\nBatched indexing into a 3-tensor:\n\n```python\n indices = [[[1]], [[0]]]\n params = [[[\'a0\', \'b0\'], [\'c0\', \'d0\']],\n [[\'a1\', \'b1\'], [\'c1\', \'d1\']]]\n output = [[[[\'a1\', \'b1\'], [\'c1\', \'d1\']]],\n [[[\'a0\', \'b0\'], [\'c0\', \'d0\']]]]\n\n indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]\n params = [[[\'a0\', \'b0\'], [\'c0\', \'d0\']],\n [[\'a1\', \'b1\'], [\'c1\', \'d1\']]]\n output = [[[\'c0\', \'d0\'], [\'a1\', \'b1\']],\n [[\'a0\', \'b0\'], [\'c1\', \'d1\']]]\n\n\n indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]\n params = [[[\'a0\', \'b0\'], [\'c0\', \'d0\']],\n [[\'a1\', \'b1\'], [\'c1\', \'d1\']]]\n output = [[\'b0\', \'b1\'], [\'d0\', \'c1\']]\n```" } op { name: "GatherV2" input_arg { name: "params" description: "The tensor from which to gather values. Must be at least rank\n`axis + 1`." type_attr: "Tparams" } input_arg { name: "indices" description: "Index tensor. Must be in range `[0, params.shape[axis])`." type_attr: "Tindices" } input_arg { name: "axis" description: "The axis in `params` to gather `indices` from. Defaults to the first\ndimension. Supports negative indexes." type_attr: "Taxis" } output_arg { name: "output" description: "Values from `params` gathered from indices given by `indices`, with\nshape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`." type_attr: "Tparams" } attr { name: "Tparams" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "Taxis" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Gather slices from `params` axis `axis` according to `indices`." description: "`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).\nProduces an output tensor with shape `params.shape[:axis] + indices.shape +\nparams.shape[axis + 1:]` where:\n\n```python\n # Scalar indices (output is rank(params) - 1).\n output[a_0, ..., a_n, b_0, ..., b_n] =\n params[a_0, ..., a_n, indices, b_0, ..., b_n]\n\n # Vector indices (output is rank(params)).\n output[a_0, ..., a_n, i, b_0, ..., b_n] =\n params[a_0, ..., a_n, indices[i], b_0, ..., b_n]\n\n # Higher rank indices (output is rank(params) + rank(indices) - 1).\n output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =\n params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]\n```\n\n
\n\n
" } op { name: "GenerateVocabRemapping" input_arg { name: "new_vocab_file" description: "Path to the new vocab file." type: DT_STRING } input_arg { name: "old_vocab_file" description: "Path to the old vocab file." type: DT_STRING } output_arg { name: "remapping" description: "A Tensor of length num_new_vocab where the element at index i\nis equal to the old ID that maps to the new ID i. This element is -1 for any\nnew ID that is not found in the old vocabulary." type: DT_INT64 } output_arg { name: "num_present" description: "Number of new vocab entries found in old vocab." type: DT_INT32 } attr { name: "new_vocab_offset" type: "int" description: "How many entries into the new vocab file to start reading." has_minimum: true } attr { name: "num_new_vocab" type: "int" description: "Number of entries in the new vocab file to remap." has_minimum: true } attr { name: "old_vocab_size" type: "int" default_value { i: -1 } description: "Number of entries in the old vocab file to consider. If -1,\nuse the entire old vocabulary." has_minimum: true minimum: -1 } summary: "Given a path to new and old vocabulary files, returns a remapping Tensor of" description: "length `num_new_vocab`, where `remapping[i]` contains the row number in the old\nvocabulary that corresponds to row `i` in the new vocabulary (starting at line\n`new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`\nin the new vocabulary is not in the old vocabulary. The old vocabulary is\nconstrained to the first `old_vocab_size` entries if `old_vocab_size` is not the\ndefault value of -1.\n\n`num_vocab_offset` enables\nuse in the partitioned variable case, and should generally be set through\nexamining partitioning info. The format of the files should be a text file,\nwith each line containing a single entity within the vocabulary.\n\nFor example, with `new_vocab_file` a text file containing each of the following\nelements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],\n`num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be\n`[0, -1, 2]`.\n\nThe op also returns a count of how many entries in the new vocabulary\nwere present in the old vocabulary, which is used to calculate the number of\nvalues to initialize in a weight matrix remapping\n\nThis functionality can be used to remap both row vocabularies (typically,\nfeatures) and column vocabularies (typically, classes) from TensorFlow\ncheckpoints. Note that the partitioning logic relies on contiguous vocabularies\ncorresponding to div-partitioned variables. Moreover, the underlying remapping\nuses an IndexTable (as opposed to an inexact CuckooTable), so client code should\nuse the corresponding index_table_from_file() as the FeatureColumn framework\ndoes (as opposed to tf.feature_to_id(), which uses a CuckooTable)." } op { name: "GetSessionHandle" input_arg { name: "value" description: "The tensor to be stored." type_attr: "T" } output_arg { name: "handle" description: "The handle for the tensor stored in the session state, represented\nas a string." type: DT_STRING } attr { name: "T" type: "type" } summary: "Store the input tensor in the state of the current session." } op { name: "GetSessionHandleV2" input_arg { name: "value" description: "The tensor to be stored." type_attr: "T" } output_arg { name: "handle" description: "The handle for the tensor stored in the session state, represented\nas a ResourceHandle object." type: DT_RESOURCE } attr { name: "T" type: "type" } summary: "Store the input tensor in the state of the current session." is_stateful: true } op { name: "GetSessionTensor" input_arg { name: "handle" description: "The handle for a tensor stored in the session state." type: DT_STRING } output_arg { name: "value" description: "The tensor for the given handle." type_attr: "dtype" } attr { name: "dtype" type: "type" description: "The type of the output value." } summary: "Get the value of the tensor specified by its handle." } op { name: "Greater" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Returns the truth value of (x > y) element-wise." description: "*NOTE*: `Greater` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" } op { name: "GreaterEqual" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Returns the truth value of (x >= y) element-wise." description: "*NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" } op { name: "GroupByWindowDataset" input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "key_func_other_arguments" type_list_attr: "Tkey_func_other_arguments" } input_arg { name: "reduce_func_other_arguments" type_list_attr: "Treduce_func_other_arguments" } input_arg { name: "window_size_func_other_arguments" type_list_attr: "Twindow_size_func_other_arguments" } output_arg { name: "handle" type: DT_VARIANT } attr { name: "key_func" type: "func" description: "A function mapping an element of `input_dataset`, concatenated\nwith `key_func_other_arguments` to a scalar value of type DT_INT64." } attr { name: "reduce_func" type: "func" } attr { name: "window_size_func" type: "func" } attr { name: "Tkey_func_other_arguments" type: "list(type)" has_minimum: true } attr { name: "Treduce_func_other_arguments" type: "list(type)" has_minimum: true } attr { name: "Twindow_size_func_other_arguments" type: "list(type)" has_minimum: true } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "Creates a dataset that computes a windowed group-by on `input_dataset`." description: "// TODO(mrry): Support non-int64 keys." } op { name: "HSVToRGB" input_arg { name: "images" description: "1-D or higher rank. HSV data to convert. Last dimension must be size 3." type_attr: "T" } output_arg { name: "output" description: "`images` converted to RGB." type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } summary: "Convert one or more images from HSV to RGB." description: "Outputs a tensor of the same shape as the `images` tensor, containing the RGB\nvalue of the pixels. The output is only well defined if the value in `images`\nare in `[0,1]`.\n\nSee `rgb_to_hsv` for a description of the HSV encoding." } op { name: "HashTable" output_arg { name: "table_handle" description: "Handle to a table." type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this table is shared under the given name across\nmultiple sessions." } attr { name: "use_node_name_sharing" type: "bool" default_value { b: false } description: "If true and shared_name is empty, the table is shared\nusing the node name." } attr { name: "key_dtype" type: "type" description: "Type of the table keys." } attr { name: "value_dtype" type: "type" description: "Type of the table values." } summary: "Creates a non-initialized hash table." description: "This op creates a hash table, specifying the type of its keys and values.\nBefore using the table you will have to initialize it. After initialization the\ntable will be immutable." is_stateful: true } op { name: "HashTableV2" output_arg { name: "table_handle" description: "Handle to a table." type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this table is shared under the given name across\nmultiple sessions." } attr { name: "use_node_name_sharing" type: "bool" default_value { b: false } description: "If true and shared_name is empty, the table is shared\nusing the node name." } attr { name: "key_dtype" type: "type" description: "Type of the table keys." } attr { name: "value_dtype" type: "type" description: "Type of the table values." } summary: "Creates a non-initialized hash table." description: "This op creates a hash table, specifying the type of its keys and values.\nBefore using the table you will have to initialize it. After initialization the\ntable will be immutable." is_stateful: true } op { name: "HistogramFixedWidth" input_arg { name: "values" description: "Numeric `Tensor`." type_attr: "T" } input_arg { name: "value_range" description: "Shape [2] `Tensor` of same `dtype` as `values`.\nvalues <= value_range[0] will be mapped to hist[0],\nvalues >= value_range[1] will be mapped to hist[-1]." type_attr: "T" } input_arg { name: "nbins" description: "Scalar `int32 Tensor`. Number of histogram bins." type: DT_INT32 } output_arg { name: "out" description: "A 1-D `Tensor` holding histogram of values." type_attr: "dtype" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "dtype" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Return histogram of values." description: "Given the tensor `values`, this operation returns a rank 1 histogram counting\nthe number of entries in `values` that fall into every bin. The bins are\nequal width and determined by the arguments `value_range` and `nbins`.\n\n```python\n# Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)\nnbins = 5\nvalue_range = [0.0, 5.0]\nnew_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]\n\nwith tf.get_default_session() as sess:\n hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)\n variables.global_variables_initializer().run()\n sess.run(hist) => [2, 1, 1, 0, 2]\n```" } op { name: "HistogramSummary" input_arg { name: "tag" description: "Scalar. Tag to use for the `Summary.Value`." type: DT_STRING } input_arg { name: "values" description: "Any shape. Values to use to build the histogram." type_attr: "T" } output_arg { name: "summary" description: "Scalar. Serialized `Summary` protocol buffer." type: DT_STRING } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Outputs a `Summary` protocol buffer with a histogram." description: "The generated\n[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)\nhas one summary value containing a histogram for `values`.\n\nThis op reports an `InvalidArgument` error if any value is not finite." } op { name: "IFFT" input_arg { name: "input" description: "A complex64 tensor." type: DT_COMPLEX64 } output_arg { name: "output" description: "A complex64 tensor of the same shape as `input`. The inner-most\n dimension of `input` is replaced with its inverse 1D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.ifft\n@end_compatibility" type: DT_COMPLEX64 } summary: "Inverse fast Fourier transform." description: "Computes the inverse 1-dimensional discrete Fourier transform over the\ninner-most dimension of `input`." } op { name: "IFFT2D" input_arg { name: "input" description: "A complex64 tensor." type: DT_COMPLEX64 } output_arg { name: "output" description: "A complex64 tensor of the same shape as `input`. The inner-most 2\n dimensions of `input` are replaced with their inverse 2D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.ifft2\n@end_compatibility" type: DT_COMPLEX64 } summary: "Inverse 2D fast Fourier transform." description: "Computes the inverse 2-dimensional discrete Fourier transform over the\ninner-most 2 dimensions of `input`." } op { name: "IFFT3D" input_arg { name: "input" description: "A complex64 tensor." type: DT_COMPLEX64 } output_arg { name: "output" description: "A complex64 tensor of the same shape as `input`. The inner-most 3\n dimensions of `input` are replaced with their inverse 3D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.ifftn with 3 dimensions.\n@end_compatibility" type: DT_COMPLEX64 } summary: "Inverse 3D fast Fourier transform." description: "Computes the inverse 3-dimensional discrete Fourier transform over the\ninner-most 3 dimensions of `input`." } op { name: "IRFFT" input_arg { name: "input" description: "A complex64 tensor." type: DT_COMPLEX64 } input_arg { name: "fft_length" description: "An int32 tensor of shape [1]. The FFT length." type: DT_INT32 } output_arg { name: "output" description: "A float32 tensor of the same rank as `input`. The inner-most\n dimension of `input` is replaced with the `fft_length` samples of its inverse\n 1D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.irfft\n@end_compatibility" type: DT_FLOAT } summary: "Inverse real-valued fast Fourier transform." description: "Computes the inverse 1-dimensional discrete Fourier transform of a real-valued\nsignal over the inner-most dimension of `input`.\n\nThe inner-most dimension of `input` is assumed to be the result of `RFFT`: the\n`fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If\n`fft_length` is not provided, it is computed from the size of the inner-most\ndimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to\ncompute `input` is odd, it should be provided since it cannot be inferred\nproperly.\n\nAlong the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller\nthan the corresponding dimension of `input`, the dimension is cropped. If it is\nlarger, the dimension is padded with zeros." } op { name: "IRFFT2D" input_arg { name: "input" description: "A complex64 tensor." type: DT_COMPLEX64 } input_arg { name: "fft_length" description: "An int32 tensor of shape [2]. The FFT length for each dimension." type: DT_INT32 } output_arg { name: "output" description: "A float32 tensor of the same rank as `input`. The inner-most 2\n dimensions of `input` are replaced with the `fft_length` samples of their\n inverse 2D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.irfft2\n@end_compatibility" type: DT_FLOAT } summary: "Inverse 2D real-valued fast Fourier transform." description: "Computes the inverse 2-dimensional discrete Fourier transform of a real-valued\nsignal over the inner-most 2 dimensions of `input`.\n\nThe inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`:\nThe inner-most dimension contains the `fft_length / 2 + 1` unique components of\nthe DFT of a real-valued signal. If `fft_length` is not provided, it is computed\nfrom the size of the inner-most 2 dimensions of `input`. If the FFT length used\nto compute `input` is odd, it should be provided since it cannot be inferred\nproperly.\n\nAlong each axis `IRFFT2D` is computed on, if `fft_length` (or\n`fft_length / 2 + 1` for the inner-most dimension) is smaller than the\ncorresponding dimension of `input`, the dimension is cropped. If it is larger,\nthe dimension is padded with zeros." } op { name: "IRFFT3D" input_arg { name: "input" description: "A complex64 tensor." type: DT_COMPLEX64 } input_arg { name: "fft_length" description: "An int32 tensor of shape [3]. The FFT length for each dimension." type: DT_INT32 } output_arg { name: "output" description: "A float32 tensor of the same rank as `input`. The inner-most 3\n dimensions of `input` are replaced with the `fft_length` samples of their\n inverse 3D real Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.irfftn with 3 dimensions.\n@end_compatibility" type: DT_FLOAT } summary: "Inverse 3D real-valued fast Fourier transform." description: "Computes the inverse 3-dimensional discrete Fourier transform of a real-valued\nsignal over the inner-most 3 dimensions of `input`.\n\nThe inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`:\nThe inner-most dimension contains the `fft_length / 2 + 1` unique components of\nthe DFT of a real-valued signal. If `fft_length` is not provided, it is computed\nfrom the size of the inner-most 3 dimensions of `input`. If the FFT length used\nto compute `input` is odd, it should be provided since it cannot be inferred\nproperly.\n\nAlong each axis `IRFFT3D` is computed on, if `fft_length` (or\n`fft_length / 2 + 1` for the inner-most dimension) is smaller than the\ncorresponding dimension of `input`, the dimension is cropped. If it is larger,\nthe dimension is padded with zeros." } op { name: "Identity" input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } summary: "Return a tensor with the same shape and contents as the input tensor or value." } op { name: "IdentityN" input_arg { name: "input" type_list_attr: "T" } output_arg { name: "output" type_list_attr: "T" } attr { name: "T" type: "list(type)" has_minimum: true minimum: 1 } summary: "Returns a list of tensors with the same shapes and contents as the input" description: "tensors.\n\nThis op can be used to override the gradient for complicated functions. For\nexample, suppose y = f(x) and we wish to apply a custom function g for backprop\nsuch that dx = g(dy). In Python,\n\n```python\nwith tf.get_default_graph().gradient_override_map(\n {\'IdentityN\': \'OverrideGradientWithG\'}):\n y, _ = identity_n([f(x), x])\n\[email protected](\'OverrideGradientWithG\')\ndef ApplyG(op, dy, _):\n return [None, g(dy)] # Do not backprop to f(x).\n```" } op { name: "IdentityReader" output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } summary: "A Reader that outputs the queued work as both the key and value." description: "To use, enqueue strings in a Queue. ReaderRead will take the front\nwork string and output (work, work)." is_stateful: true } op { name: "IdentityReaderV2" output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } summary: "A Reader that outputs the queued work as both the key and value." description: "To use, enqueue strings in a Queue. ReaderRead will take the front\nwork string and output (work, work)." is_stateful: true } op { name: "Igamma" input_arg { name: "a" type_attr: "T" } input_arg { name: "x" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } summary: "Compute the lower regularized incomplete Gamma function `Q(a, x)`." description: "The lower regularized incomplete Gamma function is defined as:\n\n\n\\\\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\\\)\n\nwhere\n\n\\\\(gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt\\\\)\n\nis the lower incomplete Gamma function.\n\nNote, above `Q(a, x)` (`Igammac`) is the upper regularized complete\nGamma function." } op { name: "Igammac" input_arg { name: "a" type_attr: "T" } input_arg { name: "x" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } summary: "Compute the upper regularized incomplete Gamma function `Q(a, x)`." description: "The upper regularized incomplete Gamma function is defined as:\n\n\\\\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\\\)\n\nwhere\n\n\\\\(Gamma(a, x) = int_{x}^{\\infty} t^{a-1} exp(-t) dt\\\\)\n\nis the upper incomplete Gama function.\n\nNote, above `P(a, x)` (`Igamma`) is the lower regularized complete\nGamma function." } op { name: "IgnoreErrorsDataset" input_arg { name: "input_dataset" type: DT_VARIANT } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "Creates a dataset that contains the elements of `input_dataset` ignoring errors." } op { name: "Imag" input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "Tout" } attr { name: "T" type: "type" default_value { type: DT_COMPLEX64 } allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } } } attr { name: "Tout" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } summary: "Returns the imaginary part of a complex number." description: "Given a tensor `input` of complex numbers, this operation returns a tensor of\ntype `float` that is the imaginary part of each element in `input`. All\nelements in `input` must be complex numbers of the form \\\\(a + bj\\\\), where *a*\nis the real part and *b* is the imaginary part returned by this operation.\n\nFor example:\n\n```\n# tensor \'input\' is [-2.25 + 4.75j, 3.25 + 5.75j]\ntf.imag(input) ==> [4.75, 5.75]\n```" } op { name: "ImageSummary" input_arg { name: "tag" description: "Scalar. Used to build the `tag` attribute of the summary values." type: DT_STRING } input_arg { name: "tensor" description: "4-D of shape `[batch_size, height, width, channels]` where\n`channels` is 1, 3, or 4." type_attr: "T" } output_arg { name: "summary" description: "Scalar. Serialized `Summary` protocol buffer." type: DT_STRING } attr { name: "max_images" type: "int" default_value { i: 3 } description: "Max number of batch elements to generate images for." has_minimum: true minimum: 1 } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_UINT8 type: DT_FLOAT type: DT_HALF type: DT_DOUBLE } } } attr { name: "bad_color" type: "tensor" default_value { tensor { dtype: DT_UINT8 tensor_shape { dim { size: 4 } } int_val: 255 int_val: 0 int_val: 0 int_val: 255 } } description: "Color to use for pixels with non-finite values." } summary: "Outputs a `Summary` protocol buffer with images." description: "The summary has up to `max_images` summary values containing images. The\nimages are built from `tensor` which must be 4-D with shape `[batch_size,\nheight, width, channels]` and where `channels` can be:\n\n* 1: `tensor` is interpreted as Grayscale.\n* 3: `tensor` is interpreted as RGB.\n* 4: `tensor` is interpreted as RGBA.\n\nThe images have the same number of channels as the input tensor. For float\ninput, the values are normalized one image at a time to fit in the range\n`[0, 255]`. `uint8` values are unchanged. The op uses two different\nnormalization algorithms:\n\n* If the input values are all positive, they are rescaled so the largest one\n is 255.\n\n* If any input value is negative, the values are shifted so input value 0.0\n is at 127. They are then rescaled so that either the smallest value is 0,\n or the largest one is 255.\n\nThe `tag` argument is a scalar `Tensor` of type `string`. It is used to\nbuild the `tag` of the summary values:\n\n* If `max_images` is 1, the summary value tag is \'*tag*/image\'.\n* If `max_images` is greater than 1, the summary value tags are\n generated sequentially as \'*tag*/image/0\', \'*tag*/image/1\', etc.\n\nThe `bad_color` argument is the color to use in the generated images for\nnon-finite input values. It is a `unit8` 1-D tensor of length `channels`.\nEach element must be in the range `[0, 255]` (It represents the value of a\npixel in the output image). Non-finite values in the input tensor are\nreplaced by this tensor in the output image. The default value is the color\nred." } op { name: "ImmutableConst" output_arg { name: "tensor" type_attr: "dtype" } attr { name: "dtype" type: "type" description: "Type of the returned tensor." } attr { name: "shape" type: "shape" description: "Shape of the returned tensor." } attr { name: "memory_region_name" type: "string" description: "Name of readonly memory region used by the tensor, see\nNewReadOnlyMemoryRegionFromFile in tensorflow::Env." } summary: "Returns immutable tensor from memory region." description: "The current implementation memmaps the tensor from a file." } op { name: "InTopK" input_arg { name: "predictions" description: "A `batch_size` x `classes` tensor." type: DT_FLOAT } input_arg { name: "targets" description: "A `batch_size` vector of class ids." type_attr: "T" } output_arg { name: "precision" description: "Computed Precision at `k` as a `bool Tensor`." type: DT_BOOL } attr { name: "k" type: "int" description: "Number of top elements to look at for computing precision." } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Says whether the targets are in the top `K` predictions." description: "This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the\nprediction for the target class is among the top `k` predictions among\nall predictions for example `i`. Note that the behavior of `InTopK` differs\nfrom the `TopK` op in its handling of ties; if multiple classes have the\nsame prediction value and straddle the top-`k` boundary, all of those\nclasses are considered to be in the top `k`.\n\nMore formally, let\n\n \\\\(predictions_i\\\\) be the predictions for all classes for example `i`,\n \\\\(targets_i\\\\) be the target class for example `i`,\n \\\\(out_i\\\\) be the output for example `i`,\n\n$$out_i = predictions_{i, targets_i} \\in TopKIncludingTies(predictions_i)$$" } op { name: "InTopKV2" input_arg { name: "predictions" description: "A `batch_size` x `classes` tensor." type: DT_FLOAT } input_arg { name: "targets" description: "A `batch_size` vector of class ids." type_attr: "T" } input_arg { name: "k" description: "Number of top elements to look at for computing precision." type_attr: "T" } output_arg { name: "precision" description: "Computed precision at `k` as a `bool Tensor`." type: DT_BOOL } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Says whether the targets are in the top `K` predictions." description: "This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the\nprediction for the target class is among the top `k` predictions among\nall predictions for example `i`. Note that the behavior of `InTopK` differs\nfrom the `TopK` op in its handling of ties; if multiple classes have the\nsame prediction value and straddle the top-`k` boundary, all of those\nclasses are considered to be in the top `k`.\n\nMore formally, let\n\n \\\\(predictions_i\\\\) be the predictions for all classes for example `i`,\n \\\\(targets_i\\\\) be the target class for example `i`,\n \\\\(out_i\\\\) be the output for example `i`,\n\n$$out_i = predictions_{i, targets_i} \\in TopKIncludingTies(predictions_i)$$" } op { name: "InitializeTable" input_arg { name: "table_handle" description: "Handle to a table which will be initialized." type: DT_STRING is_ref: true } input_arg { name: "keys" description: "Keys of type Tkey." type_attr: "Tkey" } input_arg { name: "values" description: "Values of type Tval." type_attr: "Tval" } attr { name: "Tkey" type: "type" } attr { name: "Tval" type: "type" } summary: "Table initializer that takes two tensors for keys and values respectively." } op { name: "InitializeTableFromTextFile" input_arg { name: "table_handle" description: "Handle to a table which will be initialized." type: DT_STRING is_ref: true } input_arg { name: "filename" description: "Filename of a vocabulary text file." type: DT_STRING } attr { name: "key_index" type: "int" description: "Column index in a line to get the table `key` values from." has_minimum: true minimum: -2 } attr { name: "value_index" type: "int" description: "Column index that represents information of a line to get the table\n`value` values from." has_minimum: true minimum: -2 } attr { name: "vocab_size" type: "int" default_value { i: -1 } description: "Number of elements of the file, use -1 if unknown." has_minimum: true minimum: -1 } attr { name: "delimiter" type: "string" default_value { s: "\t" } description: "Delimiter to separate fields in a line." } summary: "Initializes a table from a text file." description: "It inserts one key-value pair into the table for each line of the file.\nThe key and value is extracted from the whole line content, elements from the\nsplit line based on `delimiter` or the line number (starting from zero).\nWhere to extract the key and value from a line is specified by `key_index` and\n`value_index`.\n\n- A value of -1 means use the line number(starting from zero), expects `int64`.\n- A value of -2 means use the whole line content, expects `string`.\n- A value >= 0 means use the index (starting at zero) of the split line based\n on `delimiter`." } op { name: "InitializeTableFromTextFileV2" input_arg { name: "table_handle" description: "Handle to a table which will be initialized." type: DT_RESOURCE } input_arg { name: "filename" description: "Filename of a vocabulary text file." type: DT_STRING } attr { name: "key_index" type: "int" description: "Column index in a line to get the table `key` values from." has_minimum: true minimum: -2 } attr { name: "value_index" type: "int" description: "Column index that represents information of a line to get the table\n`value` values from." has_minimum: true minimum: -2 } attr { name: "vocab_size" type: "int" default_value { i: -1 } description: "Number of elements of the file, use -1 if unknown." has_minimum: true minimum: -1 } attr { name: "delimiter" type: "string" default_value { s: "\t" } description: "Delimiter to separate fields in a line." } summary: "Initializes a table from a text file." description: "It inserts one key-value pair into the table for each line of the file.\nThe key and value is extracted from the whole line content, elements from the\nsplit line based on `delimiter` or the line number (starting from zero).\nWhere to extract the key and value from a line is specified by `key_index` and\n`value_index`.\n\n- A value of -1 means use the line number(starting from zero), expects `int64`.\n- A value of -2 means use the whole line content, expects `string`.\n- A value >= 0 means use the index (starting at zero) of the split line based\n on `delimiter`." is_stateful: true } op { name: "InitializeTableV2" input_arg { name: "table_handle" description: "Handle to a table which will be initialized." type: DT_RESOURCE } input_arg { name: "keys" description: "Keys of type Tkey." type_attr: "Tkey" } input_arg { name: "values" description: "Values of type Tval." type_attr: "Tval" } attr { name: "Tkey" type: "type" } attr { name: "Tval" type: "type" } summary: "Table initializer that takes two tensors for keys and values respectively." is_stateful: true } op { name: "InterleaveDataset" input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "other_arguments" type_list_attr: "Targuments" } input_arg { name: "cycle_length" type: DT_INT64 } input_arg { name: "block_length" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "f" type: "func" description: "A function mapping elements of `input_dataset`, concatenated with\n`other_arguments`, to a Dataset variant that contains elements matching\n`output_types` and `output_shapes`." } attr { name: "Targuments" type: "list(type)" has_minimum: true } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`." description: "Unlike MapDataset, the `f` in InterleaveDataset is expected to return\na Dataset variant, and InterleaveDataset will flatten successive\nresults into a single Dataset. Unlike FlatMapDataset,\nInterleaveDataset will interleave sequences of up to `block_length`\nconsecutive elements from `cycle_length` input elements." } op { name: "Inv" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes the reciprocal of x element-wise." description: "I.e., \\\\(y = 1 / x\\\\)." deprecation { version: 17 explanation: "Use Reciprocal" } } op { name: "InvGrad" input_arg { name: "y" type_attr: "T" } input_arg { name: "dy" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes the gradient for the inverse of `x` wrt its input." description: "Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`\nis the corresponding input gradient." deprecation { version: 17 explanation: "Use ReciprocalGrad" } } op { name: "Invert" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_UINT32 type: DT_UINT64 } } } summary: "Flips all bits elementwise." description: "The result will have exactly those bits set, that are not set in `x`. The\ncomputation is performed on the underlying representation of x." } op { name: "InvertPermutation" input_arg { name: "x" description: "1-D." type_attr: "T" } output_arg { name: "y" description: "1-D." type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Computes the inverse permutation of a tensor." description: "This operation computes the inverse of an index permutation. It takes a 1-D\ninteger tensor `x`, which represents the indices of a zero-based array, and\nswaps each value with its index position. In other words, for an output tensor\n`y` and an input tensor `x`, this operation computes the following:\n\n`y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`\n\nThe values must include 0. There can be no duplicate values or negative values.\n\nFor example:\n\n```\n# tensor `x` is [3, 4, 0, 2, 1]\ninvert_permutation(x) ==> [2, 4, 3, 0, 1]\n```" } op { name: "IsFinite" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } summary: "Returns which elements of x are finite." description: "@compatibility(numpy)\nEquivalent to np.isfinite\n@end_compatibility" } op { name: "IsInf" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } summary: "Returns which elements of x are Inf." description: "@compatibility(numpy)\nEquivalent to np.isinf\n@end_compatibility" } op { name: "IsNan" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } summary: "Returns which elements of x are NaN." description: "@compatibility(numpy)\nEquivalent to np.isnan\n@end_compatibility" } op { name: "IsVariableInitialized" input_arg { name: "ref" description: "Should be from a `Variable` node. May be uninitialized." type_attr: "dtype" is_ref: true } output_arg { name: "is_initialized" type: DT_BOOL } attr { name: "dtype" type: "type" description: "The type of elements in the variable tensor." } summary: "Checks whether a tensor has been initialized." description: "Outputs boolean scalar indicating whether the tensor has been initialized." allows_uninitialized_input: true } op { name: "Iterator" output_arg { name: "handle" description: "A handle to the iterator that can be passed to a \"MakeIterator\"\nor \"IteratorGetNext\" op." type: DT_RESOURCE } attr { name: "shared_name" type: "string" } attr { name: "container" type: "string" } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "A container for an iterator resource." is_stateful: true } op { name: "IteratorFromStringHandle" input_arg { name: "string_handle" description: "A string representation of the given handle." type: DT_STRING } output_arg { name: "resource_handle" description: "A handle to an iterator resource." type: DT_RESOURCE } attr { name: "output_types" type: "list(type)" default_value { list { } } description: "If specified, defines the type of each tuple component in an\nelement produced by the resulting iterator." has_minimum: true } attr { name: "output_shapes" type: "list(shape)" default_value { list { } } description: "If specified, defines the shape of each tuple component in an\nelement produced by the resulting iterator." has_minimum: true } summary: "Converts the given string representing a handle to an iterator to a resource." is_stateful: true } op { name: "IteratorGetNext" input_arg { name: "iterator" type: DT_RESOURCE } output_arg { name: "components" type_list_attr: "output_types" } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "Gets the next output from the given iterator." is_stateful: true } op { name: "IteratorToStringHandle" input_arg { name: "resource_handle" description: "A handle to an iterator resource." type: DT_RESOURCE } output_arg { name: "string_handle" description: "A string representation of the given handle." type: DT_STRING } summary: "Converts the given `resource_handle` representing an iterator to a string." is_stateful: true } op { name: "L2Loss" input_arg { name: "t" description: "Typically 2-D, but may have any dimensions." type_attr: "T" } output_arg { name: "output" description: "0-D." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } summary: "L2 Loss." description: "Computes half the L2 norm of a tensor without the `sqrt`:\n\n output = sum(t ** 2) / 2" } op { name: "LMDBReader" output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } summary: "A Reader that outputs the records from a LMDB file." is_stateful: true } op { name: "LRN" input_arg { name: "input" description: "4-D." type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "depth_radius" type: "int" default_value { i: 5 } description: "0-D. Half-width of the 1-D normalization window." } attr { name: "bias" type: "float" default_value { f: 1 } description: "An offset (usually positive to avoid dividing by 0)." } attr { name: "alpha" type: "float" default_value { f: 1 } description: "A scale factor, usually positive." } attr { name: "beta" type: "float" default_value { f: 0.5 } description: "An exponent." } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_HALF } } } summary: "Local Response Normalization." description: "The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last\ndimension), and each vector is normalized independently. Within a given vector,\neach component is divided by the weighted, squared sum of inputs within\n`depth_radius`. In detail,\n\n sqr_sum[a, b, c, d] =\n sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)\n output = input / (bias + alpha * sqr_sum) ** beta\n\nFor details, see [Krizhevsky et al., ImageNet classification with deep\nconvolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks)." } op { name: "LRNGrad" input_arg { name: "input_grads" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } input_arg { name: "input_image" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } input_arg { name: "output_image" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } output_arg { name: "output" description: "The gradients for LRN." type_attr: "T" } attr { name: "depth_radius" type: "int" default_value { i: 5 } description: "A depth radius." } attr { name: "bias" type: "float" default_value { f: 1 } description: "An offset (usually > 0 to avoid dividing by 0)." } attr { name: "alpha" type: "float" default_value { f: 1 } description: "A scale factor, usually positive." } attr { name: "beta" type: "float" default_value { f: 0.5 } description: "An exponent." } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_HALF } } } summary: "Gradients for Local Response Normalization." } op { name: "LearnedUnigramCandidateSampler" input_arg { name: "true_classes" description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label." type: DT_INT64 } output_arg { name: "sampled_candidates" description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate." type: DT_INT64 } output_arg { name: "true_expected_count" description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability." type: DT_FLOAT } output_arg { name: "sampled_expected_count" description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability." type: DT_FLOAT } attr { name: "num_true" type: "int" description: "Number of true labels per context." has_minimum: true minimum: 1 } attr { name: "num_sampled" type: "int" description: "Number of candidates to randomly sample." has_minimum: true minimum: 1 } attr { name: "unique" type: "bool" description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities." } attr { name: "range_max" type: "int" description: "The sampler will sample integers from the interval [0, range_max)." has_minimum: true minimum: 1 } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "An second seed to avoid seed collision." } summary: "Generates labels for candidate sampling with a learned unigram distribution." description: "See explanations of candidate sampling and the data formats at\ngo/candidate-sampling.\n\nFor each batch, this op picks a single set of sampled candidate labels.\n\nThe advantages of sampling candidates per-batch are simplicity and the\npossibility of efficient dense matrix multiplication. The disadvantage is that\nthe sampled candidates must be chosen independently of the context and of the\ntrue labels." is_stateful: true } op { name: "LeftShift" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_UINT32 type: DT_UINT64 } } } summary: "Elementwise computes the bitwise left-shift of `x` and `y`." description: "If `y` is negative, or greater than or equal to the width of `x` in bits the\nresult is implementation defined." is_commutative: true } op { name: "Less" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Returns the truth value of (x < y) element-wise." description: "*NOTE*: `Less` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" } op { name: "LessEqual" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Returns the truth value of (x <= y) element-wise." description: "*NOTE*: `LessEqual` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" } op { name: "Lgamma" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } summary: "Computes the log of the absolute value of `Gamma(x)` element-wise." } op { name: "LinSpace" input_arg { name: "start" description: "First entry in the range." type_attr: "T" } input_arg { name: "stop" description: "Last entry in the range." type_attr: "T" } input_arg { name: "num" description: "Number of values to generate." type_attr: "Tidx" } output_arg { name: "output" description: "1-D. The generated values." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Generates values in an interval." description: "A sequence of `num` evenly-spaced values are generated beginning at `start`.\nIf `num > 1`, the values in the sequence increase by `stop - start / num - 1`,\nso that the last one is exactly `stop`.\n\nFor example:\n\n```\ntf.linspace(10.0, 12.0, 3, name=\"linspace\") => [ 10.0 11.0 12.0]\n```" } op { name: "ListDiff" input_arg { name: "x" description: "1-D. Values to keep." type_attr: "T" } input_arg { name: "y" description: "1-D. Values to remove." type_attr: "T" } output_arg { name: "out" description: "1-D. Values present in `x` but not in `y`." type_attr: "T" } output_arg { name: "idx" description: "1-D. Positions of `x` values preserved in `out`." type_attr: "out_idx" } attr { name: "T" type: "type" } attr { name: "out_idx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Computes the difference between two lists of numbers or strings." description: "Given a list `x` and a list `y`, this operation returns a list `out` that\nrepresents all values that are in `x` but not in `y`. The returned list `out`\nis sorted in the same order that the numbers appear in `x` (duplicates are\npreserved). This operation also returns a list `idx` that represents the\nposition of each `out` element in `x`. In other words:\n\n`out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`\n\nFor example, given this input:\n\n```\nx = [1, 2, 3, 4, 5, 6]\ny = [1, 3, 5]\n```\n\nThis operation would return:\n\n```\nout ==> [2, 4, 6]\nidx ==> [1, 3, 5]\n```" } op { name: "LoadAndRemapMatrix" input_arg { name: "ckpt_path" description: "Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from\nwhich the old matrix `Tensor` will be loaded." type: DT_STRING } input_arg { name: "old_tensor_name" description: "Name of the 2-D `Tensor` to load from checkpoint." type: DT_STRING } input_arg { name: "row_remapping" description: "An int `Tensor` of row remappings (generally created by\n`generate_vocab_remapping`). Even if no row remapping is needed, this must\nstill be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted\nindex-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`)." type: DT_INT64 } input_arg { name: "col_remapping" description: "An int `Tensor` of column remappings (generally created by\n`generate_vocab_remapping`). May be a size-0 `Tensor` if only row remapping\nis to be done (e.g. column ordering is the same)." type: DT_INT64 } input_arg { name: "initializing_values" description: "A float `Tensor` containing values to fill in for cells\nin the output matrix that are not loaded from the checkpoint. Length must be\nexactly the same as the number of missing / new cells." type: DT_FLOAT } output_arg { name: "output_matrix" description: "Output matrix containing existing values loaded from the\ncheckpoint, and with any missing values filled in from initializing_values." type: DT_FLOAT } attr { name: "num_rows" type: "int" description: "Number of rows (length of the 1st dimension) in the output matrix." has_minimum: true } attr { name: "num_cols" type: "int" description: "Number of columns (length of the 2nd dimension) in the output matrix." has_minimum: true minimum: 1 } attr { name: "max_rows_in_memory" type: "int" default_value { i: -1 } description: "The maximum number of rows to load from the checkpoint at\nonce. If less than or equal to 0, the entire matrix will be loaded into\nmemory. Setting this arg trades increased disk reads for lower memory usage." } summary: "Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint" description: "at `ckpt_path` and potentially reorders its rows and columns using the\nspecified remappings.\n\nMost users should use one of the wrapper initializers (such as\n`tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this\nfunction directly.\n\nThe remappings are 1-D tensors with the following properties:\n\n* `row_remapping` must have exactly `num_rows` entries. Row `i` of the output\n matrix will be initialized from the row corresponding to index\n `row_remapping[i]` in the old `Tensor` from the checkpoint.\n* `col_remapping` must have either 0 entries (indicating that no column\n reordering is needed) or `num_cols` entries. If specified, column `j` of the\n output matrix will be initialized from the column corresponding to index\n `col_remapping[j]` in the old `Tensor` from the checkpoint.\n* A value of -1 in either of the remappings signifies a \"missing\" entry. In that\n case, values from the `initializing_values` tensor will be used to fill that\n missing row or column. If `row_remapping` has `r` missing entries and\n `col_remapping` has `c` missing entries, then the following condition must be\n true:\n\n`(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)`\n\nThe remapping tensors can be generated using the GenerateVocabRemapping op.\n\nAs an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1],\ninitializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing\nthe value from row i, column j of the old tensor in the checkpoint, the output\nmatrix will look like the following:\n\n[[w(1, 0), w(1, 2), 0.5],\n [w(0, 0), w(0, 2), -0.5],\n [0.25, -0.25, 42]]" is_stateful: true } op { name: "Log" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes natural logarithm of x element-wise." description: "I.e., \\\\(y = \\log_e x\\\\)." } op { name: "Log1p" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes natural logarithm of (1 + x) element-wise." description: "I.e., \\\\(y = \\log_e (1 + x)\\\\)." } op { name: "LogMatrixDeterminant" input_arg { name: "input" description: "Shape is `[N, M, M]`." type_attr: "T" } output_arg { name: "sign" description: "The signs of the log determinants of the inputs. Shape is `[N]`." type_attr: "T" } output_arg { name: "log_abs_determinant" description: "The logs of the absolute values of the determinants\nof the N input matrices. Shape is `[N]`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes the sign and the log of the absolute value of the determinant of" description: "one or more square matrices.\n\nThe input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions\nform square matrices. The outputs are two tensors containing the signs and\nabsolute values of the log determinants for all N input submatrices\n`[..., :, :]` such that the determinant = sign*exp(log_abs_determinant).\nThe log_abs_determinant is computed as det(P)*sum(log(diag(LU))) where LU\nis the LU decomposition of the input and P is the corresponding\npermutation matrix." } op { name: "LogSoftmax" input_arg { name: "logits" description: "2-D with shape `[batch_size, num_classes]`." type_attr: "T" } output_arg { name: "logsoftmax" description: "Same shape as `logits`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } summary: "Computes log softmax activations." description: "For each batch `i` and class `j` we have\n\n logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))" } op { name: "LogUniformCandidateSampler" input_arg { name: "true_classes" description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label." type: DT_INT64 } output_arg { name: "sampled_candidates" description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate." type: DT_INT64 } output_arg { name: "true_expected_count" description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability." type: DT_FLOAT } output_arg { name: "sampled_expected_count" description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability." type: DT_FLOAT } attr { name: "num_true" type: "int" description: "Number of true labels per context." has_minimum: true minimum: 1 } attr { name: "num_sampled" type: "int" description: "Number of candidates to randomly sample." has_minimum: true minimum: 1 } attr { name: "unique" type: "bool" description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities." } attr { name: "range_max" type: "int" description: "The sampler will sample integers from the interval [0, range_max)." has_minimum: true minimum: 1 } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "An second seed to avoid seed collision." } summary: "Generates labels for candidate sampling with a log-uniform distribution." description: "See explanations of candidate sampling and the data formats at\ngo/candidate-sampling.\n\nFor each batch, this op picks a single set of sampled candidate labels.\n\nThe advantages of sampling candidates per-batch are simplicity and the\npossibility of efficient dense matrix multiplication. The disadvantage is that\nthe sampled candidates must be chosen independently of the context and of the\ntrue labels." is_stateful: true } op { name: "LogicalAnd" input_arg { name: "x" type: DT_BOOL } input_arg { name: "y" type: DT_BOOL } output_arg { name: "z" type: DT_BOOL } summary: "Returns the truth value of x AND y element-wise." description: "*NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" is_commutative: true } op { name: "LogicalNot" input_arg { name: "x" type: DT_BOOL } output_arg { name: "y" type: DT_BOOL } summary: "Returns the truth value of NOT x element-wise." } op { name: "LogicalOr" input_arg { name: "x" type: DT_BOOL } input_arg { name: "y" type: DT_BOOL } output_arg { name: "z" type: DT_BOOL } summary: "Returns the truth value of x OR y element-wise." description: "*NOTE*: `LogicalOr` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" is_commutative: true } op { name: "LookupTableExport" input_arg { name: "table_handle" description: "Handle to the table." type: DT_STRING is_ref: true } output_arg { name: "keys" description: "Vector of all keys present in the table." type_attr: "Tkeys" } output_arg { name: "values" description: "Tensor of all values in the table. Indexed in parallel with `keys`." type_attr: "Tvalues" } attr { name: "Tkeys" type: "type" } attr { name: "Tvalues" type: "type" } summary: "Outputs all keys and values in the table." } op { name: "LookupTableExportV2" input_arg { name: "table_handle" description: "Handle to the table." type: DT_RESOURCE } output_arg { name: "keys" description: "Vector of all keys present in the table." type_attr: "Tkeys" } output_arg { name: "values" description: "Tensor of all values in the table. Indexed in parallel with `keys`." type_attr: "Tvalues" } attr { name: "Tkeys" type: "type" } attr { name: "Tvalues" type: "type" } summary: "Outputs all keys and values in the table." is_stateful: true } op { name: "LookupTableFind" input_arg { name: "table_handle" description: "Handle to the table." type: DT_STRING is_ref: true } input_arg { name: "keys" description: "Any shape. Keys to look up." type_attr: "Tin" } input_arg { name: "default_value" type_attr: "Tout" } output_arg { name: "values" description: "Same shape as `keys`. Values found in the table, or `default_values`\nfor missing keys." type_attr: "Tout" } attr { name: "Tin" type: "type" } attr { name: "Tout" type: "type" } summary: "Looks up keys in a table, outputs the corresponding values." description: "The tensor `keys` must of the same type as the keys of the table.\nThe output `values` is of the type of the table values.\n\nThe scalar `default_value` is the value output for keys not present in the\ntable. It must also be of the same type as the table values." } op { name: "LookupTableFindV2" input_arg { name: "table_handle" description: "Handle to the table." type: DT_RESOURCE } input_arg { name: "keys" description: "Any shape. Keys to look up." type_attr: "Tin" } input_arg { name: "default_value" type_attr: "Tout" } output_arg { name: "values" description: "Same shape as `keys`. Values found in the table, or `default_values`\nfor missing keys." type_attr: "Tout" } attr { name: "Tin" type: "type" } attr { name: "Tout" type: "type" } summary: "Looks up keys in a table, outputs the corresponding values." description: "The tensor `keys` must of the same type as the keys of the table.\nThe output `values` is of the type of the table values.\n\nThe scalar `default_value` is the value output for keys not present in the\ntable. It must also be of the same type as the table values." is_stateful: true } op { name: "LookupTableImport" input_arg { name: "table_handle" description: "Handle to the table." type: DT_STRING is_ref: true } input_arg { name: "keys" description: "Any shape. Keys to look up." type_attr: "Tin" } input_arg { name: "values" description: "Values to associate with keys." type_attr: "Tout" } attr { name: "Tin" type: "type" } attr { name: "Tout" type: "type" } summary: "Replaces the contents of the table with the specified keys and values." description: "The tensor `keys` must be of the same type as the keys of the table.\nThe tensor `values` must be of the type of the table values." } op { name: "LookupTableImportV2" input_arg { name: "table_handle" description: "Handle to the table." type: DT_RESOURCE } input_arg { name: "keys" description: "Any shape. Keys to look up." type_attr: "Tin" } input_arg { name: "values" description: "Values to associate with keys." type_attr: "Tout" } attr { name: "Tin" type: "type" } attr { name: "Tout" type: "type" } summary: "Replaces the contents of the table with the specified keys and values." description: "The tensor `keys` must be of the same type as the keys of the table.\nThe tensor `values` must be of the type of the table values." is_stateful: true } op { name: "LookupTableInsert" input_arg { name: "table_handle" description: "Handle to the table." type: DT_STRING is_ref: true } input_arg { name: "keys" description: "Any shape. Keys to look up." type_attr: "Tin" } input_arg { name: "values" description: "Values to associate with keys." type_attr: "Tout" } attr { name: "Tin" type: "type" } attr { name: "Tout" type: "type" } summary: "Updates the table to associates keys with values." description: "The tensor `keys` must be of the same type as the keys of the table.\nThe tensor `values` must be of the type of the table values." } op { name: "LookupTableInsertV2" input_arg { name: "table_handle" description: "Handle to the table." type: DT_RESOURCE } input_arg { name: "keys" description: "Any shape. Keys to look up." type_attr: "Tin" } input_arg { name: "values" description: "Values to associate with keys." type_attr: "Tout" } attr { name: "Tin" type: "type" } attr { name: "Tout" type: "type" } summary: "Updates the table to associates keys with values." description: "The tensor `keys` must be of the same type as the keys of the table.\nThe tensor `values` must be of the type of the table values." is_stateful: true } op { name: "LookupTableSize" input_arg { name: "table_handle" description: "Handle to the table." type: DT_STRING is_ref: true } output_arg { name: "size" description: "Scalar that contains number of elements in the table." type: DT_INT64 } summary: "Computes the number of elements in the given table." } op { name: "LookupTableSizeV2" input_arg { name: "table_handle" description: "Handle to the table." type: DT_RESOURCE } output_arg { name: "size" description: "Scalar that contains number of elements in the table." type: DT_INT64 } summary: "Computes the number of elements in the given table." is_stateful: true } op { name: "LoopCond" input_arg { name: "input" description: "A boolean scalar, representing the branch predicate of the Switch op." type: DT_BOOL } output_arg { name: "output" description: "The same tensor as `input`." type: DT_BOOL } summary: "Forwards the input to the output." description: "This operator represents the loop termination condition used by the\n\"pivot\" switches of a loop." } op { name: "MakeIterator" input_arg { name: "dataset" type: DT_VARIANT } input_arg { name: "iterator" type: DT_RESOURCE } summary: "Makes a new iterator from the given `dataset` and stores it in `iterator`." description: "This operation may be executed multiple times. Each execution will reset the\niterator in `iterator` to the first element of `dataset`." is_stateful: true } op { name: "MapAndBatchDataset" input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "other_arguments" type_list_attr: "Targuments" } input_arg { name: "batch_size" description: "A scalar representing the number of elements to accumulate in a\nbatch. It determines the number of concurrent invocations of `f` that process\nelements from `input_dataset` in parallel." type: DT_INT64 } input_arg { name: "num_parallel_batches" description: "A scalar representing the number of batches to create in\nparallel. Processing multiple batches in parallel benefits workloads prone to\nstragglers." type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "f" type: "func" } attr { name: "Targuments" type: "list(type)" has_minimum: true } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "Creates a dataset that applies `f` to the outputs of `input_dataset` and then" description: "batches `batch_size` of them.\n\nUnlike a \"MapDataset\", which applies `f` sequentially, this dataset invokes up\nto `batch_size * num_parallel_batches` copies of `f` in parallel." } op { name: "MapClear" attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } summary: "Op removes all elements in the underlying container." is_stateful: true } op { name: "MapDataset" input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "other_arguments" type_list_attr: "Targuments" } output_arg { name: "handle" type: DT_VARIANT } attr { name: "f" type: "func" } attr { name: "Targuments" type: "list(type)" has_minimum: true } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`." } op { name: "MapIncompleteSize" output_arg { name: "size" type: DT_INT32 } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } summary: "Op returns the number of incomplete elements in the underlying container." is_stateful: true } op { name: "MapPeek" input_arg { name: "key" type: DT_INT64 } input_arg { name: "indices" type: DT_INT32 } output_arg { name: "values" type_list_attr: "dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } summary: "Op peeks at the values at the specified key. If the" description: "underlying container does not contain this key\nthis op will block until it does." is_stateful: true } op { name: "MapSize" output_arg { name: "size" type: DT_INT32 } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } summary: "Op returns the number of elements in the underlying container." is_stateful: true } op { name: "MapStage" input_arg { name: "key" description: "int64" type: DT_INT64 } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "values" description: "a list of tensors\ndtypes A list of data types that inserted values should adhere to." type_list_attr: "fake_dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } description: "Maximum number of elements in the Staging Area. If > 0, inserts\non the container will block when the capacity is reached." has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "fake_dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this queue is placed in the given container. Otherwise,\na default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "It is necessary to match this name to the matching Unstage Op." } summary: "Stage (key, values) in the underlying container which behaves like a hashtable." is_stateful: true } op { name: "MapUnstage" input_arg { name: "key" type: DT_INT64 } input_arg { name: "indices" type: DT_INT32 } output_arg { name: "values" type_list_attr: "dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } summary: "Op removes and returns the values associated with the key" description: "from the underlying container. If the underlying container\ndoes not contain this key, the op will block until it does." is_stateful: true } op { name: "MapUnstageNoKey" input_arg { name: "indices" type: DT_INT32 } output_arg { name: "key" type: DT_INT64 } output_arg { name: "values" type_list_attr: "dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } summary: "Op removes and returns a random (key, value)" description: "from the underlying container. If the underlying container\ndoes not contain elements, the op will block until it does." is_stateful: true } op { name: "MatMul" input_arg { name: "a" type_attr: "T" } input_arg { name: "b" type_attr: "T" } output_arg { name: "product" type_attr: "T" } attr { name: "transpose_a" type: "bool" default_value { b: false } description: "If true, \"a\" is transposed before multiplication." } attr { name: "transpose_b" type: "bool" default_value { b: false } description: "If true, \"b\" is transposed before multiplication." } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Multiply the matrix \"a\" by the matrix \"b\"." description: "The inputs must be two-dimensional matrices and the inner dimension of\n\"a\" (after being transposed if transpose_a is true) must match the\nouter dimension of \"b\" (after being transposed if transposed_b is\ntrue).\n\n*Note*: The default kernel implementation for MatMul on GPUs uses\ncublas." } op { name: "MatchingFiles" input_arg { name: "pattern" description: "Shell wildcard pattern(s). Scalar or vector of type string." type: DT_STRING } output_arg { name: "filenames" description: "A vector of matching filenames." type: DT_STRING } summary: "Returns the set of files matching one or more glob patterns." description: "Note that this routine only supports wildcard characters in the\nbasename portion of the pattern, not in the directory portion." } op { name: "MatrixBandPart" input_arg { name: "input" description: "Rank `k` tensor." type_attr: "T" } input_arg { name: "num_lower" description: "0-D tensor. Number of subdiagonals to keep. If negative, keep entire\nlower triangle." type: DT_INT64 } input_arg { name: "num_upper" description: "0-D tensor. Number of superdiagonals to keep. If negative, keep\nentire upper triangle." type: DT_INT64 } output_arg { name: "band" description: "Rank `k` tensor of the same shape as input. The extracted banded tensor." type_attr: "T" } attr { name: "T" type: "type" } summary: "Copy a tensor setting everything outside a central band in each innermost matrix" description: "to zero.\n\nThe `band` part is computed as follows:\nAssume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a\ntensor with the same shape where\n\n`band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.\n\nThe indicator function\n\n`in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&\n (num_upper < 0 || (n-m) <= num_upper)`.\n\nFor example:\n\n```\n# if \'input\' is [[ 0, 1, 2, 3]\n [-1, 0, 1, 2]\n [-2, -1, 0, 1]\n [-3, -2, -1, 0]],\n\ntf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3]\n [-1, 0, 1, 2]\n [ 0, -1, 0, 1]\n [ 0, 0, -1, 0]],\n\ntf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0]\n [-1, 0, 1, 0]\n [-2, -1, 0, 1]\n [ 0, -2, -1, 0]]\n```\n\nUseful special cases:\n\n```\n tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.\n tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.\n tf.matrix_band_part(input, 0, 0) ==> Diagonal.\n```" } op { name: "MatrixDeterminant" input_arg { name: "input" description: "Shape is `[..., M, M]`." type_attr: "T" } output_arg { name: "output" description: "Shape is `[...]`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes the determinant of one or more square matrices." description: "The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices. The output is a tensor containing the determinants\nfor all input submatrices `[..., :, :]`." } op { name: "MatrixDiag" input_arg { name: "diagonal" description: "Rank `k`, where `k >= 1`." type_attr: "T" } output_arg { name: "output" description: "Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`." type_attr: "T" } attr { name: "T" type: "type" } summary: "Returns a batched diagonal tensor with a given batched diagonal values." description: "Given a `diagonal`, this operation returns a tensor with the `diagonal` and\neverything else padded with zeros. The diagonal is computed as follows:\n\nAssume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a\ntensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:\n\n`output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.\n\nFor example:\n\n```\n# \'diagonal\' is [[1, 2, 3, 4], [5, 6, 7, 8]]\n\nand diagonal.shape = (2, 4)\n\ntf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]\n [0, 2, 0, 0]\n [0, 0, 3, 0]\n [0, 0, 0, 4]],\n [[5, 0, 0, 0]\n [0, 6, 0, 0]\n [0, 0, 7, 0]\n [0, 0, 0, 8]]]\n\nwhich has shape (2, 4, 4)\n```" } op { name: "MatrixDiagPart" input_arg { name: "input" description: "Rank `k` tensor where `k >= 2`." type_attr: "T" } output_arg { name: "diagonal" description: "The extracted diagonal(s) having shape\n`diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`." type_attr: "T" } attr { name: "T" type: "type" } summary: "Returns the batched diagonal part of a batched tensor." description: "This operation returns a tensor with the `diagonal` part\nof the batched `input`. The `diagonal` part is computed as follows:\n\nAssume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a\ntensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:\n\n`diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.\n\nThe input must be at least a matrix.\n\nFor example:\n\n```\n# \'input\' is [[[1, 0, 0, 0]\n [0, 2, 0, 0]\n [0, 0, 3, 0]\n [0, 0, 0, 4]],\n [[5, 0, 0, 0]\n [0, 6, 0, 0]\n [0, 0, 7, 0]\n [0, 0, 0, 8]]]\n\nand input.shape = (2, 4, 4)\n\ntf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]\n\nwhich has shape (2, 4)\n```" } op { name: "MatrixInverse" input_arg { name: "input" description: "Shape is `[..., M, M]`." type_attr: "T" } output_arg { name: "output" description: "Shape is `[..., M, M]`.\n\n@compatibility(numpy)\nEquivalent to np.linalg.inv\n@end_compatibility" type_attr: "T" } attr { name: "adjoint" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes the inverse of one or more square invertible matrices or their" description: "adjoints (conjugate transposes).\n\nThe input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices. The output is a tensor of the same shape as the input\ncontaining the inverse for all input submatrices `[..., :, :]`.\n\nThe op uses LU decomposition with partial pivoting to compute the inverses.\n\nIf a matrix is not invertible there is no guarantee what the op does. It\nmay detect the condition and raise an exception or it may simply return a\ngarbage result." } op { name: "MatrixSetDiag" input_arg { name: "input" description: "Rank `k+1`, where `k >= 1`." type_attr: "T" } input_arg { name: "diagonal" description: "Rank `k`, where `k >= 1`." type_attr: "T" } output_arg { name: "output" description: "Rank `k+1`, with `output.shape = input.shape`." type_attr: "T" } attr { name: "T" type: "type" } summary: "Returns a batched matrix tensor with new batched diagonal values." description: "Given `input` and `diagonal`, this operation returns a tensor with the\nsame shape and values as `input`, except for the main diagonal of the\ninnermost matrices. These will be overwritten by the values in `diagonal`.\n\nThe output is computed as follows:\n\nAssume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has\n`k` dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a\ntensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:\n\n * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.\n * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`." } op { name: "MatrixSolve" input_arg { name: "matrix" description: "Shape is `[..., M, M]`." type_attr: "T" } input_arg { name: "rhs" description: "Shape is `[..., M, K]`." type_attr: "T" } output_arg { name: "output" description: "Shape is `[..., M, K]`." type_attr: "T" } attr { name: "adjoint" type: "bool" default_value { b: false } description: "Boolean indicating whether to solve with `matrix` or its (block-wise)\nadjoint." } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Solves systems of linear equations." description: "`Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is\na tensor shape `[..., M, K]`. If `adjoint` is `False` then each output matrix\nsatisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.\nIf `adjoint` is `True` then each output matrix satisfies\n`adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`." } op { name: "MatrixSolveLs" input_arg { name: "matrix" description: "Shape is `[..., M, N]`." type_attr: "T" } input_arg { name: "rhs" description: "Shape is `[..., M, K]`." type_attr: "T" } input_arg { name: "l2_regularizer" description: "Scalar tensor.\n\n@compatibility(numpy)\nEquivalent to np.linalg.lstsq\n@end_compatibility" type: DT_DOUBLE } output_arg { name: "output" description: "Shape is `[..., N, K]`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_COMPLEX64 type: DT_COMPLEX128 } } } attr { name: "fast" type: "bool" default_value { b: true } } summary: "Solves one or more linear least-squares problems." description: "`matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions\nform real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same\ntype as `matrix` and shape `[..., M, K]`.\nThe output is a tensor shape `[..., N, K]` where each output matrix solves\neach of the equations\n`matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]`\nin the least squares sense.\n\nWe use the following notation for (complex) matrix and right-hand sides\nin the batch:\n\n`matrix`=\\\\(A \\in \\mathbb{C}^{m \\times n}\\\\),\n`rhs`=\\\\(B \\in \\mathbb{C}^{m \\times k}\\\\),\n`output`=\\\\(X \\in \\mathbb{C}^{n \\times k}\\\\),\n`l2_regularizer`=\\\\(\\lambda \\in \\mathbb{R}\\\\).\n\nIf `fast` is `True`, then the solution is computed by solving the normal\nequations using Cholesky decomposition. Specifically, if \\\\(m \\ge n\\\\) then\n\\\\(X = (A^H A + \\lambda I)^{-1} A^H B\\\\), which solves the least-squares\nproblem \\\\(X = \\mathrm{argmin}_{Z \\in \\Re^{n \\times k} } ||A Z - B||_F^2 +\n\\lambda ||Z||_F^2\\\\). If \\\\(m \\lt n\\\\) then `output` is computed as\n\\\\(X = A^H (A A^H + \\lambda I)^{-1} B\\\\), which (for \\\\(\\lambda = 0\\\\)) is the\nminimum-norm solution to the under-determined linear system, i.e.\n\\\\(X = \\mathrm{argmin}_{Z \\in \\mathbb{C}^{n \\times k} } ||Z||_F^2 \\\\),\nsubject to \\\\(A Z = B\\\\). Notice that the fast path is only numerically stable\nwhen \\\\(A\\\\) is numerically full rank and has a condition number\n\\\\(\\mathrm{cond}(A) \\lt \\frac{1}{\\sqrt{\\epsilon_{mach} } }\\\\) or\\\\(\\lambda\\\\) is\nsufficiently large.\n\nIf `fast` is `False` an algorithm based on the numerically robust complete\northogonal decomposition is used. This computes the minimum-norm\nleast-squares solution, even when \\\\(A\\\\) is rank deficient. This path is\ntypically 6-7 times slower than the fast path. If `fast` is `False` then\n`l2_regularizer` is ignored." } op { name: "MatrixTriangularSolve" input_arg { name: "matrix" description: "Shape is `[..., M, M]`." type_attr: "T" } input_arg { name: "rhs" description: "Shape is `[..., M, K]`." type_attr: "T" } output_arg { name: "output" description: "Shape is `[..., M, K]`." type_attr: "T" } attr { name: "lower" type: "bool" default_value { b: true } description: "Boolean indicating whether the innermost matrices in `matrix` are\nlower or upper triangular." } attr { name: "adjoint" type: "bool" default_value { b: false } description: "Boolean indicating whether to solve with `matrix` or its (block-wise)\n adjoint.\n\n@compatibility(numpy)\nEquivalent to np.linalg.triangular_solve\n@end_compatibility" } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Solves systems of linear equations with upper or lower triangular matrices by" description: "backsubstitution.\n\n`matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form\nsquare matrices. If `lower` is `True` then the strictly upper triangular part\nof each inner-most matrix is assumed to be zero and not accessed.\nIf `lower` is False then the strictly lower triangular part of each inner-most\nmatrix is assumed to be zero and not accessed.\n`rhs` is a tensor of shape `[..., M, K]`.\n\nThe output is a tensor of shape `[..., M, K]`. If `adjoint` is\n`True` then the innermost matrices in `output` satisfy matrix equations\n`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.\nIf `adjoint` is `False` then the strictly then the innermost matrices in\n`output` satisfy matrix equations\n`adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`." } op { name: "Max" input_arg { name: "input" description: "The tensor to reduce." type_attr: "T" } input_arg { name: "reduction_indices" description: "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`." type_attr: "Tidx" } output_arg { name: "output" description: "The reduced tensor." type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } description: "If true, retain reduced dimensions with length 1." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Computes the maximum of elements across dimensions of a tensor." description: "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1." } op { name: "MaxPool" input_arg { name: "input" description: "4-D input to pool over." type_attr: "T" } output_arg { name: "output" description: "The max pooled output tensor." type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_QINT8 } } } attr { name: "ksize" type: "list(int)" description: "The size of the window for each dimension of the input tensor." has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the\ninput tensor." has_minimum: true minimum: 4 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]." allowed_values { list { s: "NHWC" s: "NCHW" s: "NCHW_VECT_C" } } } summary: "Performs max pooling on the input." } op { name: "MaxPool3D" input_arg { name: "input" description: "Shape `[batch, depth, rows, cols, channels]` tensor to pool over." type_attr: "T" } output_arg { name: "output" description: "The max pooled output tensor." type_attr: "T" } attr { name: "ksize" type: "list(int)" description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`." has_minimum: true minimum: 5 } attr { name: "strides" type: "list(int)" description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`." has_minimum: true minimum: 5 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NDHWC" } description: "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n [batch, in_channels, in_depth, in_height, in_width]." allowed_values { list { s: "NDHWC" s: "NCDHW" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT } } } summary: "Performs 3D max pooling on the input." } op { name: "MaxPool3DGrad" input_arg { name: "orig_input" description: "The original input tensor." type_attr: "TInput" } input_arg { name: "orig_output" description: "The original output tensor." type_attr: "TInput" } input_arg { name: "grad" description: "Output backprop of shape `[batch, depth, rows, cols, channels]`." type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "ksize" type: "list(int)" description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`." has_minimum: true minimum: 5 } attr { name: "strides" type: "list(int)" description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`." has_minimum: true minimum: 5 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NDHWC" } description: "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n [batch, in_channels, in_depth, in_height, in_width]." allowed_values { list { s: "NDHWC" s: "NCDHW" } } } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT } } } attr { name: "TInput" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT } } } summary: "Computes gradients of max pooling function." } op { name: "MaxPool3DGradGrad" input_arg { name: "orig_input" description: "The original input tensor." type_attr: "T" } input_arg { name: "orig_output" description: "The original output tensor." type_attr: "T" } input_arg { name: "grad" description: "Output backprop of shape `[batch, depth, rows, cols, channels]`." type_attr: "T" } output_arg { name: "output" description: "Gradients of gradients w.r.t. the input to `max_pool`." type_attr: "T" } attr { name: "ksize" type: "list(int)" description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`." has_minimum: true minimum: 5 } attr { name: "strides" type: "list(int)" description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`." has_minimum: true minimum: 5 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NDHWC" } description: "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n [batch, in_channels, in_depth, in_height, in_width]." allowed_values { list { s: "NDHWC" s: "NCDHW" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT } } } summary: "Computes second-order gradients of the maxpooling function." } op { name: "MaxPoolGrad" input_arg { name: "orig_input" description: "The original input tensor." type_attr: "T" } input_arg { name: "orig_output" description: "The original output tensor." type_attr: "T" } input_arg { name: "grad" description: "4-D. Gradients w.r.t. the output of `max_pool`." type_attr: "T" } output_arg { name: "output" description: "Gradients w.r.t. the input to `max_pool`." type_attr: "T" } attr { name: "ksize" type: "list(int)" description: "The size of the window for each dimension of the input tensor." has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the\ninput tensor." has_minimum: true minimum: 4 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]." allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Computes gradients of the maxpooling function." } op { name: "MaxPoolGradGrad" input_arg { name: "orig_input" description: "The original input tensor." type_attr: "T" } input_arg { name: "orig_output" description: "The original output tensor." type_attr: "T" } input_arg { name: "grad" description: "4-D. Gradients of gradients w.r.t. the input of `max_pool`." type_attr: "T" } output_arg { name: "output" description: "Gradients of gradients w.r.t. the input to `max_pool`." type_attr: "T" } attr { name: "ksize" type: "list(int)" description: "The size of the window for each dimension of the input tensor." has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the\ninput tensor." has_minimum: true minimum: 4 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]." allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Computes second-order gradients of the maxpooling function." } op { name: "MaxPoolGradGradV2" input_arg { name: "orig_input" description: "The original input tensor." type_attr: "T" } input_arg { name: "orig_output" description: "The original output tensor." type_attr: "T" } input_arg { name: "grad" description: "4-D. Gradients of gradients w.r.t. the input of `max_pool`." type_attr: "T" } input_arg { name: "ksize" description: "The size of the window for each dimension of the input tensor." type: DT_INT32 } input_arg { name: "strides" description: "The stride of the sliding window for each dimension of the\ninput tensor." type: DT_INT32 } output_arg { name: "output" description: "Gradients of gradients w.r.t. the input to `max_pool`." type_attr: "T" } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]." allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Computes second-order gradients of the maxpooling function." } op { name: "MaxPoolGradGradWithArgmax" input_arg { name: "input" description: "The original input." type_attr: "T" } input_arg { name: "grad" description: "4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the\ninput of `max_pool`." type_attr: "T" } input_arg { name: "argmax" description: "The indices of the maximum values chosen for each output of `max_pool`." type_attr: "Targmax" } output_arg { name: "output" description: "Gradients of gradients w.r.t. the input of `max_pool`." type_attr: "T" } attr { name: "ksize" type: "list(int)" description: "The size of the window for each dimension of the input tensor." has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the\ninput tensor." has_minimum: true minimum: 4 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "Targmax" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Computes second-order gradients of the maxpooling function." } op { name: "MaxPoolGradV2" input_arg { name: "orig_input" description: "The original input tensor." type_attr: "T" } input_arg { name: "orig_output" description: "The original output tensor." type_attr: "T" } input_arg { name: "grad" description: "4-D. Gradients w.r.t. the output of `max_pool`." type_attr: "T" } input_arg { name: "ksize" description: "The size of the window for each dimension of the input tensor." type: DT_INT32 } input_arg { name: "strides" description: "The stride of the sliding window for each dimension of the\ninput tensor." type: DT_INT32 } output_arg { name: "output" description: "Gradients w.r.t. the input to `max_pool`." type_attr: "T" } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]." allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Computes gradients of the maxpooling function." } op { name: "MaxPoolGradWithArgmax" input_arg { name: "input" description: "The original input." type_attr: "T" } input_arg { name: "grad" description: "4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the\noutput of `max_pool`." type_attr: "T" } input_arg { name: "argmax" description: "The indices of the maximum values chosen for each output of `max_pool`." type_attr: "Targmax" } output_arg { name: "output" description: "Gradients w.r.t. the input of `max_pool`." type_attr: "T" } attr { name: "ksize" type: "list(int)" description: "The size of the window for each dimension of the input tensor." has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the\ninput tensor." has_minimum: true minimum: 4 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "Targmax" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Computes gradients of the maxpooling function." } op { name: "MaxPoolV2" input_arg { name: "input" description: "4-D input to pool over." type_attr: "T" } input_arg { name: "ksize" description: "The size of the window for each dimension of the input tensor." type: DT_INT32 } input_arg { name: "strides" description: "The stride of the sliding window for each dimension of the\ninput tensor." type: DT_INT32 } output_arg { name: "output" description: "The max pooled output tensor." type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_QINT8 } } } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]." allowed_values { list { s: "NHWC" s: "NCHW" s: "NCHW_VECT_C" } } } summary: "Performs max pooling on the input." } op { name: "MaxPoolWithArgmax" input_arg { name: "input" description: "4-D with shape `[batch, height, width, channels]`. Input to pool over." type_attr: "T" } output_arg { name: "output" description: "The max pooled output tensor." type_attr: "T" } output_arg { name: "argmax" description: "4-D. The flattened indices of the max values chosen for each output." type_attr: "Targmax" } attr { name: "ksize" type: "list(int)" description: "The size of the window for each dimension of the input tensor." has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the\ninput tensor." has_minimum: true minimum: 4 } attr { name: "Targmax" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Performs max pooling on the input and outputs both max values and indices." description: "The indices in `argmax` are flattened, so that a maximum value at position\n`[b, y, x, c]` becomes flattened index\n`((b * height + y) * width + x) * channels + c`.\n\nThe indices returned are always in `[0, height) x [0, width)` before flattening,\neven if padding is involved and the mathematically correct answer is outside\n(either negative or too large). This is a bug, but fixing it is difficult to do\nin a safe backwards compatible way, especially due to flattening." } op { name: "Maximum" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } summary: "Returns the max of x and y (i.e. x > y ? x : y) element-wise." description: "*NOTE*: `Maximum` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" is_commutative: true } op { name: "Mean" input_arg { name: "input" description: "The tensor to reduce." type_attr: "T" } input_arg { name: "reduction_indices" description: "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`." type_attr: "Tidx" } output_arg { name: "output" description: "The reduced tensor." type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } description: "If true, retain reduced dimensions with length 1." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Computes the mean of elements across dimensions of a tensor." description: "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1." } op { name: "Merge" input_arg { name: "inputs" description: "The input tensors, exactly one of which will become available." type_attr: "T" number_attr: "N" } output_arg { name: "output" description: "Will be set to the available input tensor." type_attr: "T" } output_arg { name: "value_index" description: "The index of the chosen input tensor in `inputs`." type: DT_INT32 } attr { name: "T" type: "type" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } summary: "Forwards the value of an available tensor from `inputs` to `output`." description: "`Merge` waits for at least one of the tensors in `inputs` to become available.\nIt is usually combined with `Switch` to implement branching.\n\n`Merge` forwards the first tensor to become available to `output`, and sets\n`value_index` to its index in `inputs`." } op { name: "MergeSummary" input_arg { name: "inputs" description: "Can be of any shape. Each must contain serialized `Summary` protocol\nbuffers." type: DT_STRING number_attr: "N" } output_arg { name: "summary" description: "Scalar. Serialized `Summary` protocol buffer." type: DT_STRING } attr { name: "N" type: "int" has_minimum: true minimum: 1 } summary: "Merges summaries." description: "This op creates a\n[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)\nprotocol buffer that contains the union of all the values in the input\nsummaries.\n\nWhen the Op is run, it reports an `InvalidArgument` error if multiple values\nin the summaries to merge use the same tag." } op { name: "MergeV2Checkpoints" input_arg { name: "checkpoint_prefixes" description: "prefixes of V2 checkpoints to merge." type: DT_STRING } input_arg { name: "destination_prefix" description: "scalar. The desired final prefix. Allowed to be the same\nas one of the checkpoint_prefixes." type: DT_STRING } attr { name: "delete_old_dirs" type: "bool" default_value { b: true } description: "see above." } summary: "V2 format specific: merges the metadata files of sharded checkpoints. The" description: "result is one logical checkpoint, with one physical metadata file and renamed\ndata files.\n\nIntended for \"grouping\" multiple checkpoints in a sharded checkpoint setup.\n\nIf delete_old_dirs is true, attempts to delete recursively the dirname of each\npath in the input checkpoint_prefixes. This is useful when those paths are non\nuser-facing temporary locations." is_stateful: true } op { name: "Mfcc" input_arg { name: "spectrogram" description: "Typically produced by the Spectrogram op, with magnitude_squared\nset to true." type: DT_FLOAT } input_arg { name: "sample_rate" description: "How many samples per second the source audio used." type: DT_INT32 } output_arg { name: "output" type: DT_FLOAT } attr { name: "upper_frequency_limit" type: "float" default_value { f: 4000 } description: "The highest frequency to use when calculating the\nceptstrum." } attr { name: "lower_frequency_limit" type: "float" default_value { f: 20 } description: "The lowest frequency to use when calculating the\nceptstrum." } attr { name: "filterbank_channel_count" type: "int" default_value { i: 40 } description: "Resolution of the Mel bank used internally." } attr { name: "dct_coefficient_count" type: "int" default_value { i: 13 } description: "How many output channels to produce per time slice." } summary: "Transforms a spectrogram into a form that\'s useful for speech recognition." description: "Mel Frequency Cepstral Coefficients are a way of representing audio data that\'s\nbeen effective as an input feature for machine learning. They are created by\ntaking the spectrum of a spectrogram (a \'cepstrum\'), and discarding some of the\nhigher frequencies that are less significant to the human ear. They have a long\nhistory in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum\nis a good resource to learn more." } op { name: "Min" input_arg { name: "input" description: "The tensor to reduce." type_attr: "T" } input_arg { name: "reduction_indices" description: "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`." type_attr: "Tidx" } output_arg { name: "output" description: "The reduced tensor." type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } description: "If true, retain reduced dimensions with length 1." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Computes the minimum of elements across dimensions of a tensor." description: "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1." } op { name: "Minimum" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } summary: "Returns the min of x and y (i.e. x < y ? x : y) element-wise." description: "*NOTE*: `Minimum` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" is_commutative: true } op { name: "MirrorPad" input_arg { name: "input" description: "The input tensor to be padded." type_attr: "T" } input_arg { name: "paddings" description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`." type_attr: "Tpaddings" } output_arg { name: "output" description: "The padded tensor." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tpaddings" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "mode" type: "string" description: "Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions\ndo not include the borders, while in symmetric mode the padded regions\ndo include the borders. For example, if `input` is `[1, 2, 3]` and `paddings`\nis `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and\nit is `[1, 2, 3, 3, 2]` in symmetric mode." allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } } } summary: "Pads a tensor with mirrored values." description: "This operation pads a `input` with mirrored values according to the `paddings`\nyou specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is\nthe rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates\nhow many values to add before the contents of `input` in that dimension, and\n`paddings[D, 1]` indicates how many values to add after the contents of `input`\nin that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater\nthan `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true\n(if false, respectively).\n\nThe padded size of each dimension D of the output is:\n\n`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`\n\nFor example:\n\n```\n# \'t\' is [[1, 2, 3], [4, 5, 6]].\n# \'paddings\' is [[1, 1]], [2, 2]].\n# \'mode\' is SYMMETRIC.\n# rank of \'t\' is 2.\npad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]\n [2, 1, 1, 2, 3, 3, 2]\n [5, 4, 4, 5, 6, 6, 5]\n [5, 4, 4, 5, 6, 6, 5]]\n```" } op { name: "MirrorPadGrad" input_arg { name: "input" description: "The input tensor to be folded." type_attr: "T" } input_arg { name: "paddings" description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`." type_attr: "Tpaddings" } output_arg { name: "output" description: "The folded tensor." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tpaddings" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "mode" type: "string" description: "The mode used in the `MirrorPad` op." allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } } } summary: "Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor." description: "This operation folds the padded areas of `input` by `MirrorPad` according to the\n`paddings` you specify. `paddings` must be the same as `paddings` argument\ngiven to the corresponding `MirrorPad` op.\n\nThe folded size of each dimension D of the output is:\n\n`input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`\n\nFor example:\n\n```\n# \'t\' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].\n# \'paddings\' is [[0, 1]], [0, 1]].\n# \'mode\' is SYMMETRIC.\n# rank of \'t\' is 2.\npad(t, paddings) ==> [[ 1, 5]\n [11, 28]]\n```" } op { name: "Mod" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE } } } summary: "Returns element-wise remainder of division. This emulates C semantics in that" description: "the result here is consistent with a truncating divide. E.g.\n`tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`.\n\n*NOTE*: `Mod` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" } op { name: "Mul" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Returns x * y element-wise." description: "*NOTE*: `Mul` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" is_commutative: true } op { name: "Multinomial" input_arg { name: "logits" description: "2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]`\nrepresents the unnormalized log probabilities for all classes." type_attr: "T" } input_arg { name: "num_samples" description: "0-D. Number of independent samples to draw for each row slice." type: DT_INT32 } output_arg { name: "output" description: "2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]`\ncontains the drawn class labels with range `[0, num_classes)`." type: DT_INT64 } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 is set to be non-zero, the internal random number\ngenerator is seeded by the given seed. Otherwise, a random seed is used." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Draws samples from a multinomial distribution." is_stateful: true } op { name: "MutableDenseHashTable" input_arg { name: "empty_key" description: "The key used to represent empty key buckets internally. Must not\nbe used in insert or lookup operations." type_attr: "key_dtype" } output_arg { name: "table_handle" description: "Handle to a table." type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this table is shared under the given name across\nmultiple sessions." } attr { name: "use_node_name_sharing" type: "bool" default_value { b: false } } attr { name: "key_dtype" type: "type" description: "Type of the table keys." } attr { name: "value_dtype" type: "type" description: "Type of the table values." } attr { name: "value_shape" type: "shape" default_value { shape { } } description: "The shape of each value." } attr { name: "initial_num_buckets" type: "int" default_value { i: 131072 } description: "The initial number of hash table buckets. Must be a power\nto 2." } attr { name: "max_load_factor" type: "float" default_value { f: 0.8 } description: "The maximum ratio between number of entries and number of\nbuckets before growing the table. Must be between 0 and 1." } summary: "Creates an empty hash table that uses tensors as the backing store." description: "It uses \"open addressing\" with quadratic reprobing to resolve\ncollisions.\n\nThis op creates a mutable hash table, specifying the type of its keys and\nvalues. Each value must be a scalar. Data can be inserted into the table using\nthe insert operations. It does not support the initialization operation." is_stateful: true } op { name: "MutableDenseHashTableV2" input_arg { name: "empty_key" description: "The key used to represent empty key buckets internally. Must not\nbe used in insert or lookup operations." type_attr: "key_dtype" } output_arg { name: "table_handle" description: "Handle to a table." type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this table is shared under the given name across\nmultiple sessions." } attr { name: "use_node_name_sharing" type: "bool" default_value { b: false } } attr { name: "key_dtype" type: "type" description: "Type of the table keys." } attr { name: "value_dtype" type: "type" description: "Type of the table values." } attr { name: "value_shape" type: "shape" default_value { shape { } } description: "The shape of each value." } attr { name: "initial_num_buckets" type: "int" default_value { i: 131072 } description: "The initial number of hash table buckets. Must be a power\nto 2." } attr { name: "max_load_factor" type: "float" default_value { f: 0.8 } description: "The maximum ratio between number of entries and number of\nbuckets before growing the table. Must be between 0 and 1." } summary: "Creates an empty hash table that uses tensors as the backing store." description: "It uses \"open addressing\" with quadratic reprobing to resolve\ncollisions.\n\nThis op creates a mutable hash table, specifying the type of its keys and\nvalues. Each value must be a scalar. Data can be inserted into the table using\nthe insert operations. It does not support the initialization operation." is_stateful: true } op { name: "MutableHashTable" output_arg { name: "table_handle" description: "Handle to a table." type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this table is shared under the given name across\nmultiple sessions." } attr { name: "use_node_name_sharing" type: "bool" default_value { b: false } description: "If true and shared_name is empty, the table is shared\nusing the node name." } attr { name: "key_dtype" type: "type" description: "Type of the table keys." } attr { name: "value_dtype" type: "type" description: "Type of the table values." } summary: "Creates an empty hash table." description: "This op creates a mutable hash table, specifying the type of its keys and\nvalues. Each value must be a scalar. Data can be inserted into the table using\nthe insert operations. It does not support the initialization operation." is_stateful: true } op { name: "MutableHashTableOfTensors" output_arg { name: "table_handle" description: "Handle to a table." type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this table is shared under the given name across\nmultiple sessions." } attr { name: "use_node_name_sharing" type: "bool" default_value { b: false } } attr { name: "key_dtype" type: "type" description: "Type of the table keys." } attr { name: "value_dtype" type: "type" description: "Type of the table values." } attr { name: "value_shape" type: "shape" default_value { shape { } } } summary: "Creates an empty hash table." description: "This op creates a mutable hash table, specifying the type of its keys and\nvalues. Each value must be a vector. Data can be inserted into the table using\nthe insert operations. It does not support the initialization operation." is_stateful: true } op { name: "MutableHashTableOfTensorsV2" output_arg { name: "table_handle" description: "Handle to a table." type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this table is shared under the given name across\nmultiple sessions." } attr { name: "use_node_name_sharing" type: "bool" default_value { b: false } } attr { name: "key_dtype" type: "type" description: "Type of the table keys." } attr { name: "value_dtype" type: "type" description: "Type of the table values." } attr { name: "value_shape" type: "shape" default_value { shape { } } } summary: "Creates an empty hash table." description: "This op creates a mutable hash table, specifying the type of its keys and\nvalues. Each value must be a vector. Data can be inserted into the table using\nthe insert operations. It does not support the initialization operation." is_stateful: true } op { name: "MutableHashTableV2" output_arg { name: "table_handle" description: "Handle to a table." type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this table is shared under the given name across\nmultiple sessions." } attr { name: "use_node_name_sharing" type: "bool" default_value { b: false } description: "If true and shared_name is empty, the table is shared\nusing the node name." } attr { name: "key_dtype" type: "type" description: "Type of the table keys." } attr { name: "value_dtype" type: "type" description: "Type of the table values." } summary: "Creates an empty hash table." description: "This op creates a mutable hash table, specifying the type of its keys and\nvalues. Each value must be a scalar. Data can be inserted into the table using\nthe insert operations. It does not support the initialization operation." is_stateful: true } op { name: "Neg" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes numerical negative value element-wise." description: "I.e., \\\\(y = -x\\\\)." } op { name: "NegTrain" input_arg { name: "w_in" description: "input word embedding." type: DT_FLOAT is_ref: true } input_arg { name: "w_out" description: "output word embedding." type: DT_FLOAT is_ref: true } input_arg { name: "examples" description: "A vector of word ids." type: DT_INT32 } input_arg { name: "labels" description: "A vector of word ids." type: DT_INT32 } input_arg { name: "lr" type: DT_FLOAT } attr { name: "vocab_count" type: "list(int)" description: "Count of words in the vocabulary." } attr { name: "num_negative_samples" type: "int" description: "Number of negative samples per example." } summary: "Training via negative sampling." deprecation { version: 19 explanation: "Moving word2vec into tensorflow_models/tutorials and deprecating its ops here as a result" } is_stateful: true } op { name: "NextIteration" input_arg { name: "data" description: "The tensor to be made available to the next iteration." type_attr: "T" } output_arg { name: "output" description: "The same tensor as `data`." type_attr: "T" } attr { name: "T" type: "type" } summary: "Makes its input available to the next iteration." } op { name: "NoOp" summary: "Does nothing. Only useful as a placeholder for control edges." } op { name: "NonMaxSuppression" input_arg { name: "boxes" description: "A 2-D float tensor of shape `[num_boxes, 4]`." type: DT_FLOAT } input_arg { name: "scores" description: "A 1-D float tensor of shape `[num_boxes]` representing a single\nscore corresponding to each box (each row of boxes)." type: DT_FLOAT } input_arg { name: "max_output_size" description: "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression." type: DT_INT32 } output_arg { name: "selected_indices" description: "A 1-D integer tensor of shape `[M]` representing the selected\nindices from the boxes tensor, where `M <= max_output_size`." type: DT_INT32 } attr { name: "iou_threshold" type: "float" default_value { f: 0.5 } description: "A float representing the threshold for deciding whether boxes\noverlap too much with respect to IOU." } summary: "Greedily selects a subset of bounding boxes in descending order of score," description: "pruning away boxes that have high intersection-over-union (IOU) overlap\nwith previously selected boxes. Bounding boxes are supplied as\n[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any\ndiagonal pair of box corners and the coordinates can be provided as normalized\n(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm\nis agnostic to where the origin is in the coordinate system. Note that this\nalgorithm is invariant to orthogonal transformations and translations\nof the coordinate system; thus translating or reflections of the coordinate\nsystem result in the same boxes being selected by the algorithm.\nThe output of this operation is a set of integers indexing into the input\ncollection of bounding boxes representing the selected boxes. The bounding\nbox coordinates corresponding to the selected indices can then be obtained\nusing the `tf.gather operation`. For example:\n selected_indices = tf.image.non_max_suppression(\n boxes, scores, max_output_size, iou_threshold)\n selected_boxes = tf.gather(boxes, selected_indices)" } op { name: "NonMaxSuppressionV2" input_arg { name: "boxes" description: "A 2-D float tensor of shape `[num_boxes, 4]`." type: DT_FLOAT } input_arg { name: "scores" description: "A 1-D float tensor of shape `[num_boxes]` representing a single\nscore corresponding to each box (each row of boxes)." type: DT_FLOAT } input_arg { name: "max_output_size" description: "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression." type: DT_INT32 } input_arg { name: "iou_threshold" description: "A 0-D float tensor representing the threshold for deciding whether\nboxes overlap too much with respect to IOU." type: DT_FLOAT } output_arg { name: "selected_indices" description: "A 1-D integer tensor of shape `[M]` representing the selected\nindices from the boxes tensor, where `M <= max_output_size`." type: DT_INT32 } summary: "Greedily selects a subset of bounding boxes in descending order of score," description: "pruning away boxes that have high intersection-over-union (IOU) overlap\nwith previously selected boxes. Bounding boxes are supplied as\n[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any\ndiagonal pair of box corners and the coordinates can be provided as normalized\n(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm\nis agnostic to where the origin is in the coordinate system. Note that this\nalgorithm is invariant to orthogonal transformations and translations\nof the coordinate system; thus translating or reflections of the coordinate\nsystem result in the same boxes being selected by the algorithm.\n\nThe output of this operation is a set of integers indexing into the input\ncollection of bounding boxes representing the selected boxes. The bounding\nbox coordinates corresponding to the selected indices can then be obtained\nusing the `tf.gather operation`. For example:\n\n selected_indices = tf.image.non_max_suppression_v2(\n boxes, scores, max_output_size, iou_threshold)\n selected_boxes = tf.gather(boxes, selected_indices)" } op { name: "NotEqual" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_QUINT8 type: DT_QINT8 type: DT_QINT32 type: DT_STRING type: DT_BOOL type: DT_COMPLEX128 } } } summary: "Returns the truth value of (x != y) element-wise." description: "*NOTE*: `NotEqual` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" is_commutative: true } op { name: "NthElement" input_arg { name: "input" description: "1-D or higher with last dimension at least `n+1`." type_attr: "T" } input_arg { name: "n" description: "0-D. Position of sorted vector to select along the last dimension (along\neach row for matrices). Valid range of n is `[0, input.shape[:-1])`" type: DT_INT32 } output_arg { name: "values" description: "The `n`-th order statistic along each last dimensional slice." type_attr: "T" } attr { name: "reverse" type: "bool" default_value { b: false } description: "When set to True, find the nth-largest value in the vector and vice\nversa." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Finds values of the `n`-th order statistic for the last dmension." description: "If the input is a vector (rank-1), finds the entries which is the nth-smallest\nvalue in the vector and outputs their values as scalar tensor.\n\nFor matrices (resp. higher rank input), computes the entries which is the\nnth-smallest value in each row (resp. vector along the last dimension). Thus,\n\n values.shape = input.shape[:-1]" } op { name: "OneHot" input_arg { name: "indices" description: "A tensor of indices." type_attr: "TI" } input_arg { name: "depth" description: "A scalar defining the depth of the one hot dimension." type: DT_INT32 } input_arg { name: "on_value" description: "A scalar defining the value to fill in output when `indices[j] = i`." type_attr: "T" } input_arg { name: "off_value" description: "A scalar defining the value to fill in output when `indices[j] != i`." type_attr: "T" } output_arg { name: "output" description: "The one-hot tensor." type_attr: "T" } attr { name: "axis" type: "int" default_value { i: -1 } description: "The axis to fill (default: -1, a new inner-most axis)." } attr { name: "T" type: "type" } attr { name: "TI" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_UINT8 type: DT_INT32 type: DT_INT64 } } } summary: "Returns a one-hot tensor." description: "The locations represented by indices in `indices` take value `on_value`,\nwhile all other locations take value `off_value`.\n\nIf the input `indices` is rank `N`, the output will have rank `N+1`,\nThe new axis is created at dimension `axis` (default: the new axis is\nappended at the end).\n\nIf `indices` is a scalar the output shape will be a vector of length `depth`.\n\nIf `indices` is a vector of length `features`, the output shape will be:\n```\n features x depth if axis == -1\n depth x features if axis == 0\n```\n\nIf `indices` is a matrix (batch) with shape `[batch, features]`,\nthe output shape will be:\n```\n batch x features x depth if axis == -1\n batch x depth x features if axis == 1\n depth x batch x features if axis == 0\n```\n\n\nExamples\n=========\n\nSuppose that\n\n```\n indices = [0, 2, -1, 1]\n depth = 3\n on_value = 5.0\n off_value = 0.0\n axis = -1\n```\n\nThen output is `[4 x 3]`:\n\n ```output =\n [5.0 0.0 0.0] // one_hot(0)\n [0.0 0.0 5.0] // one_hot(2)\n [0.0 0.0 0.0] // one_hot(-1)\n [0.0 5.0 0.0] // one_hot(1)\n ```\n\nSuppose that\n\n```\n indices = [0, 2, -1, 1]\n depth = 3\n on_value = 0.0\n off_value = 3.0\n axis = 0\n```\n\nThen output is `[3 x 4]`:\n\n ```output =\n [0.0 3.0 3.0 3.0]\n [3.0 3.0 3.0 0.0]\n [3.0 3.0 3.0 3.0]\n [3.0 0.0 3.0 3.0]\n // ^ one_hot(0)\n // ^ one_hot(2)\n // ^ one_hot(-1)\n // ^ one_hot(1)\n ```\nSuppose that\n\n```\n indices = [[0, 2], [1, -1]]\n depth = 3\n on_value = 1.0\n off_value = 0.0\n axis = -1\n```\n\nThen output is `[2 x 2 x 3]`:\n\n ```output =\n [\n [1.0, 0.0, 0.0] // one_hot(0)\n [0.0, 0.0, 1.0] // one_hot(2)\n ][\n [0.0, 1.0, 0.0] // one_hot(1)\n [0.0, 0.0, 0.0] // one_hot(-1)\n ]```" } op { name: "OneShotIterator" output_arg { name: "handle" description: "A handle to the iterator that can be passed to an \"IteratorGetNext\"\nop." type: DT_RESOURCE } attr { name: "dataset_factory" type: "func" description: "A function of type `() -> DT_VARIANT`, where the returned\nDT_VARIANT is a dataset." } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } summary: "Makes a \"one-shot\" iterator that can be iterated only once." description: "A one-shot iterator bundles the logic for defining the dataset and\nthe state of the iterator in a single op, which allows simple input\npipelines to be defined without an additional initialization\n(\"MakeIterator\") step.\n\nOne-shot iterators have the following limitations:\n\n* They do not support parameterization: all logic for creating the underlying\n dataset must be bundled in the `dataset_factory` function.\n* They are not resettable. Once a one-shot iterator reaches the end of its\n underlying dataset, subsequent \"IteratorGetNext\" operations on that\n iterator will always produce an `OutOfRange` error.\n\nFor greater flexibility, use \"Iterator\" and \"MakeIterator\" to define\nan iterator using an arbitrary subgraph, which may capture tensors\n(including fed values) as parameters, and which may be reset multiple\ntimes by rerunning \"MakeIterator\"." is_stateful: true } op { name: "OnesLike" input_arg { name: "x" description: "a tensor of type T." type_attr: "T" } output_arg { name: "y" description: "a tensor of the same shape and type as x but filled with ones." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Returns a tensor of ones with the same shape and type as x." } op { name: "OrderedMapClear" attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } summary: "Op removes all elements in the underlying container." is_stateful: true } op { name: "OrderedMapIncompleteSize" output_arg { name: "size" type: DT_INT32 } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } summary: "Op returns the number of incomplete elements in the underlying container." is_stateful: true } op { name: "OrderedMapPeek" input_arg { name: "key" type: DT_INT64 } input_arg { name: "indices" type: DT_INT32 } output_arg { name: "values" type_list_attr: "dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } summary: "Op peeks at the values at the specified key. If the" description: "underlying container does not contain this key\nthis op will block until it does. This Op is optimized for\nperformance." is_stateful: true } op { name: "OrderedMapSize" output_arg { name: "size" type: DT_INT32 } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } summary: "Op returns the number of elements in the underlying container." is_stateful: true } op { name: "OrderedMapStage" input_arg { name: "key" description: "int64" type: DT_INT64 } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "values" description: "a list of tensors\ndtypes A list of data types that inserted values should adhere to." type_list_attr: "fake_dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } description: "Maximum number of elements in the Staging Area. If > 0, inserts\non the container will block when the capacity is reached." has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "fake_dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this queue is placed in the given container. Otherwise,\na default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "It is necessary to match this name to the matching Unstage Op." } summary: "Stage (key, values) in the underlying container which behaves like a ordered" description: "associative container. Elements are ordered by key." is_stateful: true } op { name: "OrderedMapUnstage" input_arg { name: "key" type: DT_INT64 } input_arg { name: "indices" type: DT_INT32 } output_arg { name: "values" type_list_attr: "dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } summary: "Op removes and returns the values associated with the key" description: "from the underlying container. If the underlying container\ndoes not contain this key, the op will block until it does." is_stateful: true } op { name: "OrderedMapUnstageNoKey" input_arg { name: "indices" type: DT_INT32 } output_arg { name: "key" type: DT_INT64 } output_arg { name: "values" type_list_attr: "dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } summary: "Op removes and returns the (key, value) element with the smallest" description: "key from the underlying container. If the underlying container\ndoes not contain elements, the op will block until it does." is_stateful: true } op { name: "Pack" input_arg { name: "values" description: "Must be of same shape and type." type_attr: "T" number_attr: "N" } output_arg { name: "output" description: "The packed tensor." type_attr: "T" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } attr { name: "axis" type: "int" default_value { i: 0 } description: "Dimension along which to pack. Negative values wrap around, so the\nvalid range is `[-(R+1), R+1)`." } summary: "Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor." description: "Packs the `N` tensors in `values` into a tensor with rank one higher than each\ntensor in `values`, by packing them along the `axis` dimension.\nGiven a list of tensors of shape `(A, B, C)`;\n\nif `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.\nif `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.\nEtc.\n\nFor example:\n\n```\n# \'x\' is [1, 4]\n# \'y\' is [2, 5]\n# \'z\' is [3, 6]\npack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.\npack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]\n```\n\nThis is the opposite of `unpack`." } op { name: "Pad" input_arg { name: "input" type_attr: "T" } input_arg { name: "paddings" type_attr: "Tpaddings" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tpaddings" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Pads a tensor with zeros." description: "This operation pads a `input` with zeros according to the `paddings` you\nspecify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the\nrank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates\nhow many zeros to add before the contents of `input` in that dimension, and\n`paddings[D, 1]` indicates how many zeros to add after the contents of `input`\nin that dimension.\n\nThe padded size of each dimension D of the output is:\n\n`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`\n\nFor example:\n\n```\n# \'t\' is [[1, 1], [2, 2]]\n# \'paddings\' is [[1, 1], [2, 2]]\n# rank of \'t\' is 2\npad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]\n [0, 0, 1, 1, 0, 0]\n [0, 0, 2, 2, 0, 0]\n [0, 0, 0, 0, 0, 0]]\n```" } op { name: "PadV2" input_arg { name: "input" type_attr: "T" } input_arg { name: "paddings" type_attr: "Tpaddings" } input_arg { name: "constant_values" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tpaddings" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Pads a tensor." description: "This operation pads `input` according to the `paddings` and `constant_values`\nyou specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is\nthe rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates\nhow many padding values to add before the contents of `input` in that dimension,\nand `paddings[D, 1]` indicates how many padding values to add after the contents\nof `input` in that dimension. `constant_values` is a scalar tensor of the same\ntype as `input` that indicates the value to use for padding `input`.\n\nThe padded size of each dimension D of the output is:\n\n`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`\n\nFor example:\n\n```\n# \'t\' is [[1, 1], [2, 2]]\n# \'paddings\' is [[1, 1], [2, 2]]\n# \'constant_values\' is 0\n# rank of \'t\' is 2\npad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]\n [0, 0, 1, 1, 0, 0]\n [0, 0, 2, 2, 0, 0]\n [0, 0, 0, 0, 0, 0]]\n```" } op { name: "PaddedBatchDataset" input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "batch_size" description: "A scalar representing the number of elements to accumulate in a\nbatch." type: DT_INT64 } input_arg { name: "padded_shapes" description: "A list of int64 tensors representing the desired padded shapes\nof the corresponding output components. These shapes may be partially\nspecified, using `-1` to indicate that a particular dimension should be\npadded to the maximum size of all batch elements." type: DT_INT64 number_attr: "N" } input_arg { name: "padding_values" description: "A list of scalars containing the padding value to use for\neach of the outputs." type_list_attr: "Toutput_types" } output_arg { name: "handle" type: DT_VARIANT } attr { name: "Toutput_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } attr { name: "N" type: "int" has_minimum: true minimum: 1 } summary: "Creates a dataset that batches and pads `batch_size` elements from the input." } op { name: "PaddingFIFOQueue" output_arg { name: "handle" description: "The handle to the queue." type: DT_STRING is_ref: true } attr { name: "component_types" type: "list(type)" description: "The type of each component in a value." has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" default_value { list { } } description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types.\nShapes of fixed rank but variable size are allowed by setting\nany shape dimension to -1. In this case, the inputs\' shape may vary along\nthe given dimension, and DequeueMany will pad the given dimension with\nzeros up to the maximum shape of all elements in the given batch.\nIf the length of this attr is 0, different queue elements may have\ndifferent ranks and shapes, but only one element may be dequeued at a time." has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions." } summary: "A queue that produces elements in first-in first-out order." description: "Variable-size shapes are allowed by setting the corresponding shape dimensions\nto 0 in the shape attr. In this case DequeueMany will pad up to the maximum\nsize of any given element in the minibatch. See below for details." is_stateful: true } op { name: "PaddingFIFOQueueV2" output_arg { name: "handle" description: "The handle to the queue." type: DT_RESOURCE } attr { name: "component_types" type: "list(type)" description: "The type of each component in a value." has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" default_value { list { } } description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types.\nShapes of fixed rank but variable size are allowed by setting\nany shape dimension to -1. In this case, the inputs\' shape may vary along\nthe given dimension, and DequeueMany will pad the given dimension with\nzeros up to the maximum shape of all elements in the given batch.\nIf the length of this attr is 0, different queue elements may have\ndifferent ranks and shapes, but only one element may be dequeued at a time." has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions." } summary: "A queue that produces elements in first-in first-out order." description: "Variable-size shapes are allowed by setting the corresponding shape dimensions\nto 0 in the shape attr. In this case DequeueMany will pad up to the maximum\nsize of any given element in the minibatch. See below for details." is_stateful: true } op { name: "ParallelConcat" input_arg { name: "values" description: "Tensors to be concatenated. All must have size 1 in the first dimension\nand same shape." type_attr: "T" number_attr: "N" } output_arg { name: "output" description: "The concatenated tensor." type_attr: "T" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } attr { name: "shape" type: "shape" description: "the final shape of the result; should be equal to the shapes of any input\nbut with the number of input values in the first dimension." } summary: "Concatenates a list of `N` tensors along the first dimension." description: "The input tensors are all required to have size 1 in the first dimension.\n\nFor example:\n\n```\n# \'x\' is [[1, 4]]\n# \'y\' is [[2, 5]]\n# \'z\' is [[3, 6]]\nparallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.\n```\n\nThe difference between concat and parallel_concat is that concat requires all\nof the inputs be computed before the operation will begin but doesn\'t require\nthat the input shapes be known during graph construction. Parallel concat\nwill copy pieces of the input into the output as they become available, in\nsome situations this can provide a performance benefit." } op { name: "ParallelDynamicStitch" input_arg { name: "indices" type: DT_INT32 number_attr: "N" } input_arg { name: "data" type_attr: "T" number_attr: "N" } output_arg { name: "merged" type_attr: "T" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } summary: "Interleave the values from the `data` tensors into a single tensor." description: "Builds a merged tensor such that\n\n```python\n merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]\n```\n\nFor example, if each `indices[m]` is scalar or vector, we have\n\n```python\n # Scalar indices:\n merged[indices[m], ...] = data[m][...]\n\n # Vector indices:\n merged[indices[m][i], ...] = data[m][i, ...]\n```\n\nEach `data[i].shape` must start with the corresponding `indices[i].shape`,\nand the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we\nmust have `data[i].shape = indices[i].shape + constant`. In terms of this\n`constant`, the output shape is\n\n merged.shape = [max(indices)] + constant\n\nValues may be merged in parallel, so if an index appears in both `indices[m][i]`\nand `indices[n][j]`, the result may be invalid. This differs from the normal\nDynamicStitch operator that defines the behavior in that case.\n\nFor example:\n\n```python\n indices[0] = 6\n indices[1] = [4, 1]\n indices[2] = [[5, 2], [0, 3]]\n data[0] = [61, 62]\n data[1] = [[41, 42], [11, 12]]\n data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]\n merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],\n [51, 52], [61, 62]]\n```\n\nThis method can be used to merge partitions created by `dynamic_partition`\nas illustrated on the following example:\n\n```python\n # Apply function (increments x_i) on elements for which a certain condition\n # apply (x_i != -1 in this example).\n x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])\n condition_mask=tf.not_equal(x,tf.constant(-1.))\n partitioned_data = tf.dynamic_partition(\n x, tf.cast(condition_mask, tf.int32) , 2)\n partitioned_data[1] = partitioned_data[1] + 1.0\n condition_indices = tf.dynamic_partition(\n tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)\n x = tf.dynamic_stitch(condition_indices, partitioned_data)\n # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain\n # unchanged.\n```\n\n
\n\n
" } op { name: "ParallelInterleaveDataset" input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "other_arguments" type_list_attr: "Targuments" } input_arg { name: "cycle_length" type: DT_INT64 } input_arg { name: "block_length" type: DT_INT64 } input_arg { name: "sloppy" type: DT_BOOL } output_arg { name: "handle" type: DT_VARIANT } attr { name: "f" type: "func" description: "A function mapping elements of `input_dataset`, concatenated with\n`other_arguments`, to a Dataset variant that contains elements matching\n`output_types` and `output_shapes`." } attr { name: "Targuments" type: "list(type)" has_minimum: true } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`." description: "The resulting dataset is similar to the `InterleaveDataset`, with the exception\nthat if retrieving the next value from a dataset would cause the requester to\nblock, it will skip that input dataset. This dataset is especially useful\nwhen loading data from a variable-latency datastores (e.g. HDFS, GCS), as it\nallows the training step to proceed so long as some data is available.\n\n!! WARNING !! This dataset is not deterministic!" } op { name: "ParallelMapDataset" input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "other_arguments" type_list_attr: "Targuments" } input_arg { name: "num_parallel_calls" description: "The number of concurrent invocations of `f` that process\nelements from `input_dataset` in parallel." type: DT_INT32 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "f" type: "func" } attr { name: "Targuments" type: "list(type)" has_minimum: true } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`." description: "Unlike a \"MapDataset\", which applies `f` sequentially, this dataset invokes up\nto `num_parallel_calls` copies of `f` in parallel." } op { name: "ParameterizedTruncatedNormal" input_arg { name: "shape" description: "The shape of the output tensor. Batches are indexed by the 0th dimension." type_attr: "T" } input_arg { name: "means" description: "The mean parameter of each batch." type_attr: "dtype" } input_arg { name: "stdevs" description: "The standard deviation parameter of each batch. Must be greater than 0." type_attr: "dtype" } input_arg { name: "minvals" description: "The minimum cutoff. May be -infinity." type_attr: "dtype" } input_arg { name: "maxvals" description: "The maximum cutoff. May be +infinity, and must be more than the minval\nfor each batch." type_attr: "dtype" } output_arg { name: "output" description: "A matrix of shape num_batches x samples_per_batch, filled with random\ntruncated normal values using the parameters for each row." type_attr: "dtype" } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "dtype" type: "type" description: "The type of the output." allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Outputs random values from a normal distribution. The parameters may each be a" description: "scalar which applies to the entire output, or a vector of length shape[0] which\nstores the parameters for each batch." is_stateful: true } op { name: "ParseExample" input_arg { name: "serialized" description: "A vector containing a batch of binary serialized Example protos." type: DT_STRING } input_arg { name: "names" description: "A vector containing the names of the serialized protos.\nMay contain, for example, table key (descriptive) names for the\ncorresponding serialized protos. These are purely useful for debugging\npurposes, and the presence of values here has no effect on the output.\nMay also be an empty vector if no names are available.\nIf non-empty, this vector must be the same length as \"serialized\"." type: DT_STRING } input_arg { name: "sparse_keys" description: "A list of Nsparse string Tensors (scalars).\nThe keys expected in the Examples\' features associated with sparse values." type: DT_STRING number_attr: "Nsparse" } input_arg { name: "dense_keys" description: "A list of Ndense string Tensors (scalars).\nThe keys expected in the Examples\' features associated with dense values." type: DT_STRING number_attr: "Ndense" } input_arg { name: "dense_defaults" description: "A list of Ndense Tensors (some may be empty).\ndense_defaults[j] provides default values\nwhen the example\'s feature_map lacks dense_key[j]. If an empty Tensor is\nprovided for dense_defaults[j], then the Feature dense_keys[j] is required.\nThe input type is inferred from dense_defaults[j], even when it\'s empty.\nIf dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,\nthen the shape of dense_defaults[j] must match that of dense_shapes[j].\nIf dense_shapes[j] has an undefined major dimension (variable strides dense\nfeature), dense_defaults[j] must contain a single element:\nthe padding element." type_list_attr: "Tdense" } output_arg { name: "sparse_indices" type: DT_INT64 number_attr: "Nsparse" } output_arg { name: "sparse_values" type_list_attr: "sparse_types" } output_arg { name: "sparse_shapes" type: DT_INT64 number_attr: "Nsparse" } output_arg { name: "dense_values" type_list_attr: "Tdense" } attr { name: "Nsparse" type: "int" has_minimum: true } attr { name: "Ndense" type: "int" has_minimum: true } attr { name: "sparse_types" type: "list(type)" description: "A list of Nsparse types; the data types of data in each Feature\ngiven in sparse_keys.\nCurrently the ParseExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList)." has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "Tdense" type: "list(type)" has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "dense_shapes" type: "list(shape)" description: "A list of Ndense shapes; the shapes of data in each Feature\ngiven in dense_keys.\nThe number of elements in the Feature corresponding to dense_key[j]\nmust always equal dense_shapes[j].NumEntries().\nIf dense_shapes[j] == (D0, D1, ..., DN) then the shape of output\nTensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):\nThe dense outputs are just the inputs row-stacked by batch.\nThis works for dense_shapes[j] = (-1, D1, ..., DN). In this case\nthe shape of the output Tensor dense_values[j] will be\n(|serialized|, M, D1, .., DN), where M is the maximum number of blocks\nof elements of length D1 * .... * DN, across all minibatch entries\nin the input. Any minibatch entry with less than M blocks of elements of\nlength D1 * ... * DN will be padded with the corresponding default_value\nscalar element along the second dimension." has_minimum: true } summary: "Transforms a vector of brain.Example protos (as strings) into typed tensors." } op { name: "ParseSingleSequenceExample" input_arg { name: "serialized" description: "A scalar containing a binary serialized SequenceExample proto." type: DT_STRING } input_arg { name: "feature_list_dense_missing_assumed_empty" description: "A vector listing the\nFeatureList keys which may be missing from the SequenceExample. If the\nassociated FeatureList is missing, it is treated as empty. By default,\nany FeatureList not listed in this vector must exist in the SequenceExample." type: DT_STRING } input_arg { name: "context_sparse_keys" description: "A list of Ncontext_sparse string Tensors (scalars).\nThe keys expected in the Examples\' features associated with context_sparse\nvalues." type: DT_STRING number_attr: "Ncontext_sparse" } input_arg { name: "context_dense_keys" description: "A list of Ncontext_dense string Tensors (scalars).\nThe keys expected in the SequenceExamples\' context features associated with\ndense values." type: DT_STRING number_attr: "Ncontext_dense" } input_arg { name: "feature_list_sparse_keys" description: "A list of Nfeature_list_sparse string Tensors\n(scalars). The keys expected in the FeatureLists associated with sparse\nvalues." type: DT_STRING number_attr: "Nfeature_list_sparse" } input_arg { name: "feature_list_dense_keys" description: "A list of Nfeature_list_dense string Tensors (scalars).\nThe keys expected in the SequenceExamples\' feature_lists associated\nwith lists of dense values." type: DT_STRING number_attr: "Nfeature_list_dense" } input_arg { name: "context_dense_defaults" description: "A list of Ncontext_dense Tensors (some may be empty).\ncontext_dense_defaults[j] provides default values\nwhen the SequenceExample\'s context map lacks context_dense_key[j].\nIf an empty Tensor is provided for context_dense_defaults[j],\nthen the Feature context_dense_keys[j] is required.\nThe input type is inferred from context_dense_defaults[j], even when it\'s\nempty. If context_dense_defaults[j] is not empty, its shape must match\ncontext_dense_shapes[j]." type_list_attr: "Tcontext_dense" } input_arg { name: "debug_name" description: "A scalar containing the name of the serialized proto.\nMay contain, for example, table key (descriptive) name for the\ncorresponding serialized proto. This is purely useful for debugging\npurposes, and the presence of values here has no effect on the output.\nMay also be an empty scalar if no name is available." type: DT_STRING } output_arg { name: "context_sparse_indices" type: DT_INT64 number_attr: "Ncontext_sparse" } output_arg { name: "context_sparse_values" type_list_attr: "context_sparse_types" } output_arg { name: "context_sparse_shapes" type: DT_INT64 number_attr: "Ncontext_sparse" } output_arg { name: "context_dense_values" type_list_attr: "Tcontext_dense" } output_arg { name: "feature_list_sparse_indices" type: DT_INT64 number_attr: "Nfeature_list_sparse" } output_arg { name: "feature_list_sparse_values" type_list_attr: "feature_list_sparse_types" } output_arg { name: "feature_list_sparse_shapes" type: DT_INT64 number_attr: "Nfeature_list_sparse" } output_arg { name: "feature_list_dense_values" type_list_attr: "feature_list_dense_types" } attr { name: "Ncontext_sparse" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "Ncontext_dense" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "Nfeature_list_sparse" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "Nfeature_list_dense" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "context_sparse_types" type: "list(type)" default_value { list { } } description: "A list of Ncontext_sparse types; the data types of data in\neach context Feature given in context_sparse_keys.\nCurrently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList)." has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "Tcontext_dense" type: "list(type)" default_value { list { } } has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "feature_list_dense_types" type: "list(type)" default_value { list { } } has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "context_dense_shapes" type: "list(shape)" default_value { list { } } description: "A list of Ncontext_dense shapes; the shapes of data in\neach context Feature given in context_dense_keys.\nThe number of elements in the Feature corresponding to context_dense_key[j]\nmust always equal context_dense_shapes[j].NumEntries().\nThe shape of context_dense_values[j] will match context_dense_shapes[j]." has_minimum: true } attr { name: "feature_list_sparse_types" type: "list(type)" default_value { list { } } description: "A list of Nfeature_list_sparse types; the data types\nof data in each FeatureList given in feature_list_sparse_keys.\nCurrently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList)." has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "feature_list_dense_shapes" type: "list(shape)" default_value { list { } } description: "A list of Nfeature_list_dense shapes; the shapes of\ndata in each FeatureList given in feature_list_dense_keys.\nThe shape of each Feature in the FeatureList corresponding to\nfeature_list_dense_key[j] must always equal\nfeature_list_dense_shapes[j].NumEntries()." has_minimum: true } summary: "Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors." } op { name: "ParseTensor" input_arg { name: "serialized" description: "A scalar string containing a serialized TensorProto proto." type: DT_STRING } output_arg { name: "output" description: "A Tensor of type `out_type`." type_attr: "out_type" } attr { name: "out_type" type: "type" description: "The type of the serialized tensor. The provided type must match the\ntype of the serialized tensor and no implicit conversion will take place." } summary: "Transforms a serialized tensorflow.TensorProto proto into a Tensor." } op { name: "Placeholder" output_arg { name: "output" description: "A placeholder tensor that must be replaced using the feed mechanism." type_attr: "dtype" } attr { name: "dtype" type: "type" description: "The type of elements in the tensor." } attr { name: "shape" type: "shape" default_value { shape { unknown_rank: true } } description: "(Optional) The shape of the tensor. If the shape has 0 dimensions, the\nshape is unconstrained." } summary: "A placeholder op for a value that will be fed into the computation." description: "N.B. This operation will fail with an error if it is executed. It is\nintended as a way to represent a value that will always be fed, and to\nprovide attrs that enable the fed value to be checked at runtime." } op { name: "PlaceholderV2" output_arg { name: "output" description: "A placeholder tensor that must be replaced using the feed mechanism." type_attr: "dtype" } attr { name: "dtype" type: "type" description: "The type of elements in the tensor." } attr { name: "shape" type: "shape" description: "The shape of the tensor. The shape can be any partially-specified\nshape. To be unconstrained, pass in a shape with unknown rank." } summary: "A placeholder op for a value that will be fed into the computation." description: "N.B. This operation will fail with an error if it is executed. It is\nintended as a way to represent a value that will always be fed, and to\nprovide attrs that enable the fed value to be checked at runtime." deprecation { version: 23 explanation: "Placeholder now behaves the same as PlaceholderV2." } } op { name: "PlaceholderWithDefault" input_arg { name: "input" description: "The default value to produce when `output` is not fed." type_attr: "dtype" } output_arg { name: "output" description: "A placeholder tensor that defaults to `input` if it is not fed." type_attr: "dtype" } attr { name: "dtype" type: "type" description: "The type of elements in the tensor." } attr { name: "shape" type: "shape" description: "The (possibly partial) shape of the tensor." } summary: "A placeholder op that passes through `input` when its output is not fed." } op { name: "Polygamma" input_arg { name: "a" type_attr: "T" } input_arg { name: "x" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } summary: "Compute the polygamma function \\\\(\\psi^{(n)}(x)\\\\)." description: "The polygamma function is defined as:\n\n\n\\\\(\\psi^{(n)}(x) = \\frac{d^n}{dx^n} \\psi(x)\\\\)\n\nwhere \\\\(\\psi(x)\\\\) is the digamma function." } op { name: "PopulationCount" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type: DT_UINT8 } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_UINT32 type: DT_UINT64 } } } summary: "Computes element-wise population count (a.k.a. popcount, bitsum, bitcount)." description: "For each entry in `x`, calculates the number of `1` (on) bits in the binary\nrepresentation of that entry.\n\n**NOTE**: It is more efficient to first `tf.bitcast` your tensors into\n`int32` or `int64` and perform the bitcount on the result, than to feed in\n8- or 16-bit inputs and then aggregate the resulting counts." } op { name: "Pow" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes the power of one value to another." description: "Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for\ncorresponding elements in `x` and `y`. For example:\n\n```\n# tensor \'x\' is [[2, 2]], [3, 3]]\n# tensor \'y\' is [[8, 16], [2, 3]]\ntf.pow(x, y) ==> [[256, 65536], [9, 27]]\n```" } op { name: "PrefetchDataset" input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "buffer_size" description: "The maximum number of elements to buffer in an iterator over\nthis dataset." type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "Creates a dataset that asynchronously prefetches elements from `input_dataset`." } op { name: "PreventGradient" input_arg { name: "input" description: "any tensor." type_attr: "T" } output_arg { name: "output" description: "the same input tensor." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "message" type: "string" default_value { s: "" } description: "Will be printed in the error when anyone tries to differentiate\nthis operation." } summary: "An identity op that triggers an error if a gradient is requested." description: "When executed in a graph, this op outputs its input tensor as-is.\n\nWhen building ops to compute gradients, the TensorFlow gradient system\nwill return an error when trying to lookup the gradient of this op,\nbecause no gradient must ever be registered for this function. This\nop exists to prevent subtle bugs from silently returning unimplemented\ngradients in some corner cases." } op { name: "Print" input_arg { name: "input" description: "The tensor passed to `output`" type_attr: "T" } input_arg { name: "data" description: "A list of tensors to print out when op is evaluated." type_list_attr: "U" } output_arg { name: "output" description: "= The unmodified `input` tensor" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "U" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "message" type: "string" default_value { s: "" } description: "A string, prefix of the error message." } attr { name: "first_n" type: "int" default_value { i: -1 } description: "Only log `first_n` number of times. -1 disables logging." } attr { name: "summarize" type: "int" default_value { i: 3 } description: "Only print this many entries of each tensor." } summary: "Prints a list of tensors." description: "Passes `input` through to `output` and prints `data` when evaluating." is_stateful: true } op { name: "PriorityQueue" output_arg { name: "handle" description: "The handle to the queue." type: DT_STRING is_ref: true } attr { name: "component_types" type: "list(type)" default_value { list { } } description: "The type of each component in a value." has_minimum: true } attr { name: "shapes" type: "list(shape)" description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time." has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions." } summary: "A queue that produces elements sorted by the first component value." description: "Note that the PriorityQueue requires the first component of any element\nto be a scalar int64, in addition to the other elements declared by\ncomponent_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue\nand DequeueMany) on a PriorityQueue will all require (resp. output) one extra\nentry in their input (resp. output) lists." is_stateful: true } op { name: "PriorityQueueV2" output_arg { name: "handle" description: "The handle to the queue." type: DT_RESOURCE } attr { name: "component_types" type: "list(type)" default_value { list { } } description: "The type of each component in a value." has_minimum: true } attr { name: "shapes" type: "list(shape)" description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time." has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions." } summary: "A queue that produces elements sorted by the first component value." description: "Note that the PriorityQueue requires the first component of any element\nto be a scalar int64, in addition to the other elements declared by\ncomponent_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue\nand DequeueMany) on a PriorityQueue will all require (resp. output) one extra\nentry in their input (resp. output) lists." is_stateful: true } op { name: "Prod" input_arg { name: "input" description: "The tensor to reduce." type_attr: "T" } input_arg { name: "reduction_indices" description: "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`." type_attr: "Tidx" } output_arg { name: "output" description: "The reduced tensor." type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } description: "If true, retain reduced dimensions with length 1." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Computes the product of elements across dimensions of a tensor." description: "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1." } op { name: "PyFunc" input_arg { name: "input" description: "List of Tensors that will provide input to the Op." type_list_attr: "Tin" } output_arg { name: "output" description: "The outputs from the Op." type_list_attr: "Tout" } attr { name: "token" type: "string" description: "A token representing a registered python function in this address space." } attr { name: "Tin" type: "list(type)" description: "Data types of the inputs to the op." has_minimum: true } attr { name: "Tout" type: "list(type)" description: "Data types of the outputs from the op.\nThe length of the list specifies the number of outputs." has_minimum: true } summary: "Invokes a python function to compute func(input)->output." description: "This operation is considered stateful. For a stateless version, see\nPyFuncStateless." is_stateful: true } op { name: "PyFuncStateless" input_arg { name: "input" type_list_attr: "Tin" } output_arg { name: "output" type_list_attr: "Tout" } attr { name: "token" type: "string" } attr { name: "Tin" type: "list(type)" has_minimum: true } attr { name: "Tout" type: "list(type)" has_minimum: true } summary: "A stateless version of PyFunc." } op { name: "Qr" input_arg { name: "input" description: "A tensor of shape `[..., M, N]` whose inner-most 2 dimensions\nform matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`." type_attr: "T" } output_arg { name: "q" description: "Orthonormal basis for range of `a`. If `full_matrices` is `False` then\nshape is `[..., M, P]`; if `full_matrices` is `True` then shape is\n`[..., M, M]`." type_attr: "T" } output_arg { name: "r" description: "Triangular factor. If `full_matrices` is `False` then shape is\n`[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`." type_attr: "T" } attr { name: "full_matrices" type: "bool" default_value { b: false } description: "If true, compute full-sized `q` and `r`. If false\n(the default), compute only the leading `P` columns of `q`." } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes the QR decompositions of one or more matrices." description: "Computes the QR decomposition of each inner matrix in `tensor` such that\n`tensor[..., :, :] = q[..., :, :] * r[..., :,:])`\n\n```python\n# a is a tensor.\n# q is a tensor of orthonormal matrices.\n# r is a tensor of upper triangular matrices.\nq, r = qr(a)\nq_full, r_full = qr(a, full_matrices=True)\n```" } op { name: "QuantizeAndDequantize" input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "signed_input" type: "bool" default_value { b: true } } attr { name: "num_bits" type: "int" default_value { i: 8 } } attr { name: "range_given" type: "bool" default_value { b: false } } attr { name: "input_min" type: "float" default_value { f: 0 } } attr { name: "input_max" type: "float" default_value { f: 0 } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } summary: "Use QuantizeAndDequantizeV2 instead." deprecation { version: 22 explanation: "Replaced by QuantizeAndDequantizeV2" } } op { name: "QuantizeAndDequantizeV2" input_arg { name: "input" description: "Tensor to quantize and then dequantize." type_attr: "T" } input_arg { name: "input_min" description: "If range_given, this is the min of the range, otherwise this input\nwill be ignored." type_attr: "T" } input_arg { name: "input_max" description: "If range_given, this is the max of the range, otherwise this input\nwill be ignored." type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "signed_input" type: "bool" default_value { b: true } description: "If the quantization is signed or unsigned." } attr { name: "num_bits" type: "int" default_value { i: 8 } description: "The bitwidth of the quantization." } attr { name: "range_given" type: "bool" default_value { b: false } description: "If the range is given or should be computed from the tensor." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } summary: "Quantizes then dequantizes a tensor." description: "This op simulates the precision loss from the quantized forward pass by:\n1. Quantizing the tensor to fixed point numbers, which should match the target\n quantization method when it is used in inference.\n2. Dequantizing it back to floating point numbers for the following ops, most\n likely matmul.\n\nThere are different ways to quantize. This version does not use the full range\nof the output type, choosing to elide the lowest possible value for symmetry\n(e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit\nquantization), so that 0.0 maps to 0.\n\nTo perform this op, we first find the range of values in our tensor. The range\nwe use is always centered on 0, so we find m such that\n\n1. m = max(abs(input_min), abs(input_max)) if range_given is true,\n2. m = max(abs(min_elem(input)), abs(max_elem(input))) otherwise.\n\nOur input tensor range is then [-m, m].\n\nNext, we choose our fixed-point quantization buckets, [min_fixed, max_fixed].\nIf signed_input is true, this is\n\n [min_fixed, max_fixed ] =\n [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1].\n\nOtherwise, if signed_input is false, the fixed-point range is\n\n [min_fixed, max_fixed] = [0, (1 << num_bits) - 1].\n\nFrom this we compute our scaling factor, s:\n\n s = (max_fixed - min_fixed) / (2 * m).\n\nNow we can quantize and dequantize the elements of our tensor. An element e\nis transformed into e\':\n\n e\' = (e * s).round_to_nearest() / s.\n\nNote that we have a different number of buckets in the signed vs. unsigned\ncases. For example, if num_bits == 8, we get 254 buckets in the signed case\nvs. 255 in the unsigned case.\n\nFor example, suppose num_bits = 8 and m = 1. Then\n\n [min_fixed, max_fixed] = [-127, 127], and\n s = (127 + 127) / 2 = 127.\n\nGiven the vector {-1, -0.5, 0, 0.3}, this is quantized to\n{-127, -63, 0, 38}, and dequantized to {-1, -63.0/127, 0, 38.0/127}." } op { name: "QuantizeAndDequantizeV3" input_arg { name: "input" type_attr: "T" } input_arg { name: "input_min" type_attr: "T" } input_arg { name: "input_max" type_attr: "T" } input_arg { name: "num_bits" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "signed_input" type: "bool" default_value { b: true } } attr { name: "range_given" type: "bool" default_value { b: true } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } summary: "Quantizes then dequantizes a tensor." description: "This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a\ntensor, so its value can change during training." } op { name: "QuantizeDownAndShrinkRange" input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "input_min" description: "The float value that the minimum quantized input value represents." type: DT_FLOAT } input_arg { name: "input_max" description: "The float value that the maximum quantized input value represents." type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "output_min" description: "The float value that the minimum quantized output value represents." type: DT_FLOAT } output_arg { name: "output_max" description: "The float value that the maximum quantized output value represents." type: DT_FLOAT } attr { name: "Tinput" type: "type" description: "The type of the input." allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "out_type" type: "type" description: "The type of the output. Should be a lower bit depth than Tinput." allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } summary: "Convert the quantized \'input\' tensor into a lower-precision \'output\', using the" description: "actual distribution of the values to maximize the usage of the lower bit depth\nand adjusting the output min and max ranges accordingly.\n\n[input_min, input_max] are scalar floats that specify the range for the float\ninterpretation of the \'input\' data. For example, if input_min is -1.0f and\ninput_max is 1.0f, and we are dealing with quint16 quantized data, then a 0\nvalue in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.\n\nThis operator tries to squeeze as much precision as possible into an output with\na lower bit depth by calculating the actual min and max values found in the\ndata. For example, maybe that quint16 input has no values lower than 16,384 and\nnone higher than 49,152. That means only half the range is actually needed, all\nthe float interpretations are between -0.5f and 0.5f, so if we want to compress\nthe data into a quint8 output, we can use that range rather than the theoretical\n-1.0f to 1.0f that is suggested by the input min and max.\n\nIn practice, this is most useful for taking output from operations like\nQuantizedMatMul that can produce higher bit-depth outputs than their inputs and\nmay have large potential output ranges, but in practice have a distribution of\ninput values that only uses a small fraction of the possible range. By feeding\nthat output into this operator, we can reduce it from 32 bits down to 8 with\nminimal loss of accuracy." } op { name: "QuantizeV2" input_arg { name: "input" type: DT_FLOAT } input_arg { name: "min_range" description: "The minimum scalar value possibly produced for the input." type: DT_FLOAT } input_arg { name: "max_range" description: "The maximum scalar value possibly produced for the input." type: DT_FLOAT } output_arg { name: "output" description: "The quantized data produced from the float input." type_attr: "T" } output_arg { name: "output_min" description: "The actual minimum scalar value used for the output." type: DT_FLOAT } output_arg { name: "output_max" description: "The actual maximum scalar value used for the output." type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "mode" type: "string" default_value { s: "MIN_COMBINED" } allowed_values { list { s: "MIN_COMBINED" s: "MIN_FIRST" s: "SCALED" } } } attr { name: "round_mode" type: "string" default_value { s: "HALF_AWAY_FROM_ZERO" } allowed_values { list { s: "HALF_AWAY_FROM_ZERO" s: "HALF_TO_EVEN" } } } summary: "Quantize the \'input\' tensor of type float to \'output\' tensor of type \'T\'." description: "[min_range, max_range] are scalar floats that specify the range for\nthe \'input\' data. The \'mode\' attribute controls exactly which calculations are\nused to convert the float values to their quantized equivalents. The\n\'round_mode\' attribute controls which rounding tie-breaking algorithm is used\nwhen rounding float values to their quantized equivalents.\n\nIn \'MIN_COMBINED\' mode, each value of the tensor will undergo the following:\n\n```\nout[i] = (in[i] - min_range) * range(T) / (max_range - min_range)\nif T == qint8, out[i] -= (range(T) + 1) / 2.0\n```\nhere `range(T) = numeric_limits::max() - numeric_limits::min()`\n\n*MIN_COMBINED Mode Example*\n\nAssume the input is type float and has a possible range of [0.0, 6.0] and the\noutput type is quint8 ([0, 255]). The min_range and max_range values should be\nspecified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each\nvalue of the input by 255/6 and cast to quint8.\n\nIf the output type was qint8 ([-128, 127]), the operation will additionally\nsubtract each value by 128 prior to casting, so that the range of values aligns\nwith the range of qint8.\n\nIf the mode is \'MIN_FIRST\', then this approach is used:\n\n```\nnum_discrete_values = 1 << (# of bits in T)\nrange_adjust = num_discrete_values / (num_discrete_values - 1)\nrange = (range_max - range_min) * range_adjust\nrange_scale = num_discrete_values / range\nquantized = round(input * range_scale) - round(range_min * range_scale) +\n numeric_limits::min()\nquantized = max(quantized, numeric_limits::min())\nquantized = min(quantized, numeric_limits::max())\n```\n\nThe biggest difference between this and MIN_COMBINED is that the minimum range\nis rounded first, before it\'s subtracted from the rounded value. With\nMIN_COMBINED, a small bias is introduced where repeated iterations of quantizing\nand dequantizing will introduce a larger and larger error.\n\n*SCALED mode Example*\n\n`SCALED` mode matches the quantization approach used in\n`QuantizeAndDequantize{V2|V3}`.\n\nIf the mode is `SCALED`, we do not use the full range of the output type,\nchoosing to elide the lowest possible value for symmetry (e.g., output range is\n-127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to\n0.\n\nWe first find the range of values in our tensor. The\nrange we use is always centered on 0, so we find m such that\n```c++\n m = max(abs(input_min), abs(input_max))\n```\n\nOur input tensor range is then `[-m, m]`.\n\nNext, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.\nIf T is signed, this is\n```\n num_bits = sizeof(T) * 8\n [min_fixed, max_fixed] =\n [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]\n```\n\nOtherwise, if T is unsigned, the fixed-point range is\n```\n [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]\n```\n\nFrom this we compute our scaling factor, s:\n```c++\n s = (max_fixed - min_fixed) / (2 * m)\n```\n\nNow we can quantize the elements of our tensor:\n```c++\nresult = round(input * s)\n```\n\nOne thing to watch out for is that the operator may choose to adjust the\nrequested minimum and maximum values slightly during the quantization process,\nso you should always use the output ports as the range for further calculations.\nFor example, if the requested minimum and maximum values are close to equal,\nthey will be separated by a small epsilon value to prevent ill-formed quantized\nbuffers from being created. Otherwise, you can end up with buffers where all the\nquantized values map to the same float value, which causes problems for\noperations that have to perform further calculations on them." } op { name: "QuantizedAdd" input_arg { name: "x" type_attr: "T1" } input_arg { name: "y" type_attr: "T2" } input_arg { name: "min_x" description: "The float value that the lowest quantized `x` value represents." type: DT_FLOAT } input_arg { name: "max_x" description: "The float value that the highest quantized `x` value represents." type: DT_FLOAT } input_arg { name: "min_y" description: "The float value that the lowest quantized `y` value represents." type: DT_FLOAT } input_arg { name: "max_y" description: "The float value that the highest quantized `y` value represents." type: DT_FLOAT } output_arg { name: "z" type_attr: "Toutput" } output_arg { name: "min_z" description: "The float value that the lowest quantized output value represents." type: DT_FLOAT } output_arg { name: "max_z" description: "The float value that the highest quantized output value represents.\n\n*NOTE*: `QuantizedAdd` supports limited forms of broadcasting. More about\nbroadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" type: DT_FLOAT } attr { name: "T1" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "T2" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "Toutput" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } summary: "Returns x + y element-wise, working on quantized buffers." is_commutative: true } op { name: "QuantizedAvgPool" input_arg { name: "input" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } input_arg { name: "min_input" description: "The float value that the lowest quantized input value represents." type: DT_FLOAT } input_arg { name: "max_input" description: "The float value that the highest quantized input value represents." type: DT_FLOAT } output_arg { name: "output" type_attr: "T" } output_arg { name: "min_output" description: "The float value that the lowest quantized output value represents." type: DT_FLOAT } output_arg { name: "max_output" description: "The float value that the highest quantized output value represents." type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "ksize" type: "list(int)" description: "The size of the window for each dimension of the input tensor.\nThe length must be 4 to match the number of dimensions of the input." } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the input\ntensor. The length must be 4 to match the number of dimensions of the input." } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } summary: "Produces the average pool of the input tensor for quantized types." } op { name: "QuantizedBatchNormWithGlobalNormalization" input_arg { name: "t" description: "A 4D input Tensor." type_attr: "Tinput" } input_arg { name: "t_min" description: "The value represented by the lowest quantized input." type: DT_FLOAT } input_arg { name: "t_max" description: "The value represented by the highest quantized input." type: DT_FLOAT } input_arg { name: "m" description: "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof." type_attr: "Tinput" } input_arg { name: "m_min" description: "The value represented by the lowest quantized mean." type: DT_FLOAT } input_arg { name: "m_max" description: "The value represented by the highest quantized mean." type: DT_FLOAT } input_arg { name: "v" description: "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof." type_attr: "Tinput" } input_arg { name: "v_min" description: "The value represented by the lowest quantized variance." type: DT_FLOAT } input_arg { name: "v_max" description: "The value represented by the highest quantized variance." type: DT_FLOAT } input_arg { name: "beta" description: "A 1D beta Tensor with size matching the last dimension of t.\nAn offset to be added to the normalized tensor." type_attr: "Tinput" } input_arg { name: "beta_min" description: "The value represented by the lowest quantized offset." type: DT_FLOAT } input_arg { name: "beta_max" description: "The value represented by the highest quantized offset." type: DT_FLOAT } input_arg { name: "gamma" description: "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this tensor will be multiplied\nwith the normalized tensor." type_attr: "Tinput" } input_arg { name: "gamma_min" description: "The value represented by the lowest quantized gamma." type: DT_FLOAT } input_arg { name: "gamma_max" description: "The value represented by the highest quantized gamma." type: DT_FLOAT } output_arg { name: "result" type_attr: "out_type" } output_arg { name: "result_min" type: DT_FLOAT } output_arg { name: "result_max" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "out_type" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "variance_epsilon" type: "float" description: "A small float number to avoid dividing by 0." } attr { name: "scale_after_normalization" type: "bool" description: "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma." } summary: "Quantized Batch normalization." description: "This op is deprecated and will be removed in the future. Prefer\n`tf.nn.batch_normalization`." } op { name: "QuantizedBiasAdd" input_arg { name: "input" type_attr: "T1" } input_arg { name: "bias" description: "A 1D bias Tensor with size matching the last dimension of \'input\'." type_attr: "T2" } input_arg { name: "min_input" description: "The float value that the lowest quantized input value represents." type: DT_FLOAT } input_arg { name: "max_input" description: "The float value that the highest quantized input value represents." type: DT_FLOAT } input_arg { name: "min_bias" description: "The float value that the lowest quantized bias value represents." type: DT_FLOAT } input_arg { name: "max_bias" description: "The float value that the highest quantized bias value represents." type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_out" description: "The float value that the lowest quantized output value represents." type: DT_FLOAT } output_arg { name: "max_out" description: "The float value that the highest quantized output value represents." type: DT_FLOAT } attr { name: "T1" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "T2" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "out_type" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } summary: "Adds Tensor \'bias\' to Tensor \'input\' for Quantized types." description: "Broadcasts the values of bias on dimensions 0..N-2 of \'input\'." } op { name: "QuantizedConcat" input_arg { name: "concat_dim" description: "0-D. The dimension along which to concatenate. Must be in the\nrange [0, rank(values))." type: DT_INT32 } input_arg { name: "values" description: "The `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`." type_attr: "T" number_attr: "N" } input_arg { name: "input_mins" description: "The minimum scalar values for each of the input tensors." type: DT_FLOAT number_attr: "N" } input_arg { name: "input_maxes" description: "The maximum scalar values for each of the input tensors." type: DT_FLOAT number_attr: "N" } output_arg { name: "output" description: "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension. This tensor\'s shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes." type_attr: "T" } output_arg { name: "output_min" description: "The float value that the minimum quantized output value represents." type: DT_FLOAT } output_arg { name: "output_max" description: "The float value that the maximum quantized output value represents." type: DT_FLOAT } attr { name: "N" type: "int" has_minimum: true minimum: 2 } attr { name: "T" type: "type" } summary: "Concatenates quantized tensors along one dimension." } op { name: "QuantizedConv2D" input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "filter" description: "filter\'s input_depth dimension must match input\'s depth dimensions." type_attr: "Tfilter" } input_arg { name: "min_input" description: "The float value that the lowest quantized input value represents." type: DT_FLOAT } input_arg { name: "max_input" description: "The float value that the highest quantized input value represents." type: DT_FLOAT } input_arg { name: "min_filter" description: "The float value that the lowest quantized filter value represents." type: DT_FLOAT } input_arg { name: "max_filter" description: "The float value that the highest quantized filter value represents." type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_output" description: "The float value that the lowest quantized output value represents." type: DT_FLOAT } output_arg { name: "max_output" description: "The float value that the highest quantized output value represents." type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "Tfilter" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "out_type" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the input\ntensor." } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } summary: "Computes a 2D convolution given quantized 4D input and filter tensors." description: "The inputs are quantized tensors where the lowest value represents the real\nnumber of the associated minimum, and the highest represents the maximum.\nThis means that you can only interpret the quantized output in the same way, by\ntaking the returned minimum and maximum values into account." } op { name: "QuantizedInstanceNorm" input_arg { name: "x" description: "A 4D input Tensor." type_attr: "T" } input_arg { name: "x_min" description: "The value represented by the lowest quantized input." type: DT_FLOAT } input_arg { name: "x_max" description: "The value represented by the highest quantized input." type: DT_FLOAT } output_arg { name: "y" description: "A 4D Tensor." type_attr: "T" } output_arg { name: "y_min" description: "The value represented by the lowest quantized output." type: DT_FLOAT } output_arg { name: "y_max" description: "The value represented by the highest quantized output." type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "output_range_given" type: "bool" default_value { b: false } description: "If True, `given_y_min` and `given_y_min`\nand `given_y_max` are used as the output range. Otherwise,\nthe implementation computes the output range." } attr { name: "given_y_min" type: "float" default_value { f: 0 } description: "Output in `y_min` if `output_range_given` is True." } attr { name: "given_y_max" type: "float" default_value { f: 0 } description: "Output in `y_max` if `output_range_given` is True." } attr { name: "variance_epsilon" type: "float" default_value { f: 1e-05 } description: "A small float number to avoid dividing by 0." } attr { name: "min_separation" type: "float" default_value { f: 0.001 } description: "Minimum value of `y_max - y_min`" } summary: "Quantized Instance normalization." } op { name: "QuantizedMatMul" input_arg { name: "a" description: "Must be a two-dimensional tensor." type_attr: "T1" } input_arg { name: "b" description: "Must be a two-dimensional tensor." type_attr: "T2" } input_arg { name: "min_a" description: "The float value that the lowest quantized `a` value represents." type: DT_FLOAT } input_arg { name: "max_a" description: "The float value that the highest quantized `a` value represents." type: DT_FLOAT } input_arg { name: "min_b" description: "The float value that the lowest quantized `b` value represents." type: DT_FLOAT } input_arg { name: "max_b" description: "The float value that the highest quantized `b` value represents." type: DT_FLOAT } output_arg { name: "out" type_attr: "Toutput" } output_arg { name: "min_out" description: "The float value that the lowest quantized output value represents." type: DT_FLOAT } output_arg { name: "max_out" description: "The float value that the highest quantized output value represents." type: DT_FLOAT } attr { name: "T1" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "T2" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "Toutput" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "transpose_a" type: "bool" default_value { b: false } description: "If true, `a` is transposed before multiplication." } attr { name: "transpose_b" type: "bool" default_value { b: false } description: "If true, `b` is transposed before multiplication." } attr { name: "Tactivation" type: "type" default_value { type: DT_QUINT8 } description: "The type of output produced by activation function\nfollowing this operation." allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } summary: "Perform a quantized matrix multiplication of `a` by the matrix `b`." description: "The inputs must be two-dimensional matrices and the inner dimension of\n`a` (after being transposed if `transpose_a` is non-zero) must match the\nouter dimension of `b` (after being transposed if `transposed_b` is\nnon-zero)." } op { name: "QuantizedMaxPool" input_arg { name: "input" description: "The 4D (batch x rows x cols x depth) Tensor to MaxReduce over." type_attr: "T" } input_arg { name: "min_input" description: "The float value that the lowest quantized input value represents." type: DT_FLOAT } input_arg { name: "max_input" description: "The float value that the highest quantized input value represents." type: DT_FLOAT } output_arg { name: "output" type_attr: "T" } output_arg { name: "min_output" description: "The float value that the lowest quantized output value represents." type: DT_FLOAT } output_arg { name: "max_output" description: "The float value that the highest quantized output value represents." type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "ksize" type: "list(int)" description: "The size of the window for each dimension of the input tensor.\nThe length must be 4 to match the number of dimensions of the input." } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the input\ntensor. The length must be 4 to match the number of dimensions of the input." } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } summary: "Produces the max pool of the input tensor for quantized types." } op { name: "QuantizedMul" input_arg { name: "x" type_attr: "T1" } input_arg { name: "y" type_attr: "T2" } input_arg { name: "min_x" description: "The float value that the lowest quantized `x` value represents." type: DT_FLOAT } input_arg { name: "max_x" description: "The float value that the highest quantized `x` value represents." type: DT_FLOAT } input_arg { name: "min_y" description: "The float value that the lowest quantized `y` value represents." type: DT_FLOAT } input_arg { name: "max_y" description: "The float value that the highest quantized `y` value represents." type: DT_FLOAT } output_arg { name: "z" type_attr: "Toutput" } output_arg { name: "min_z" description: "The float value that the lowest quantized output value represents." type: DT_FLOAT } output_arg { name: "max_z" description: "The float value that the highest quantized output value represents.\n\n*NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about\nbroadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" type: DT_FLOAT } attr { name: "T1" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "T2" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "Toutput" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } summary: "Returns x * y element-wise, working on quantized buffers." is_commutative: true } op { name: "QuantizedRelu" input_arg { name: "features" type_attr: "Tinput" } input_arg { name: "min_features" description: "The float value that the lowest quantized value represents." type: DT_FLOAT } input_arg { name: "max_features" description: "The float value that the highest quantized value represents." type: DT_FLOAT } output_arg { name: "activations" description: "Has the same output shape as \"features\"." type_attr: "out_type" } output_arg { name: "min_activations" description: "The float value that the lowest quantized value represents." type: DT_FLOAT } output_arg { name: "max_activations" description: "The float value that the highest quantized value represents." type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "out_type" type: "type" default_value { type: DT_QUINT8 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } summary: "Computes Quantized Rectified Linear: `max(features, 0)`" } op { name: "QuantizedRelu6" input_arg { name: "features" type_attr: "Tinput" } input_arg { name: "min_features" description: "The float value that the lowest quantized value represents." type: DT_FLOAT } input_arg { name: "max_features" description: "The float value that the highest quantized value represents." type: DT_FLOAT } output_arg { name: "activations" description: "Has the same output shape as \"features\"." type_attr: "out_type" } output_arg { name: "min_activations" description: "The float value that the lowest quantized value represents." type: DT_FLOAT } output_arg { name: "max_activations" description: "The float value that the highest quantized value represents." type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "out_type" type: "type" default_value { type: DT_QUINT8 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } summary: "Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`" } op { name: "QuantizedReluX" input_arg { name: "features" type_attr: "Tinput" } input_arg { name: "max_value" type: DT_FLOAT } input_arg { name: "min_features" description: "The float value that the lowest quantized value represents." type: DT_FLOAT } input_arg { name: "max_features" description: "The float value that the highest quantized value represents." type: DT_FLOAT } output_arg { name: "activations" description: "Has the same output shape as \"features\"." type_attr: "out_type" } output_arg { name: "min_activations" description: "The float value that the lowest quantized value represents." type: DT_FLOAT } output_arg { name: "max_activations" description: "The float value that the highest quantized value represents." type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "out_type" type: "type" default_value { type: DT_QUINT8 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } summary: "Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`" } op { name: "QuantizedReshape" input_arg { name: "tensor" type_attr: "T" } input_arg { name: "shape" description: "Defines the shape of the output tensor." type_attr: "Tshape" } input_arg { name: "input_min" description: "The minimum value of the input." type: DT_FLOAT } input_arg { name: "input_max" description: "The maximum value of the input." type: DT_FLOAT } output_arg { name: "output" type_attr: "T" } output_arg { name: "output_min" description: "This value is copied from input_min." type: DT_FLOAT } output_arg { name: "output_max" description: "This value is copied from input_max." type: DT_FLOAT } attr { name: "T" type: "type" } attr { name: "Tshape" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Reshapes a quantized tensor as per the Reshape op." description: "```" } op { name: "QuantizedResizeBilinear" input_arg { name: "images" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } input_arg { name: "size" description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images." type: DT_INT32 } input_arg { name: "min" type: DT_FLOAT } input_arg { name: "max" type: DT_FLOAT } output_arg { name: "resized_images" description: "4-D with shape\n`[batch, new_height, new_width, channels]`." type_attr: "T" } output_arg { name: "out_min" type: DT_FLOAT } output_arg { name: "out_max" type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_QUINT8 type: DT_QINT32 type: DT_FLOAT } } } attr { name: "align_corners" type: "bool" default_value { b: false } description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension." } summary: "Resize quantized `images` to `size` using quantized bilinear interpolation." description: "Input images and output images must be quantized types." } op { name: "QueueClose" input_arg { name: "handle" description: "The handle to a queue." type: DT_STRING is_ref: true } attr { name: "cancel_pending_enqueues" type: "bool" default_value { b: false } description: "If true, all pending enqueue requests that are\nblocked on the given queue will be canceled." } summary: "Closes the given queue." description: "This operation signals that no more elements will be enqueued in the\ngiven queue. Subsequent Enqueue(Many) operations will fail.\nSubsequent Dequeue(Many) operations will continue to succeed if\nsufficient elements remain in the queue. Subsequent Dequeue(Many)\noperations that would block will fail immediately." } op { name: "QueueCloseV2" input_arg { name: "handle" description: "The handle to a queue." type: DT_RESOURCE } attr { name: "cancel_pending_enqueues" type: "bool" default_value { b: false } description: "If true, all pending enqueue requests that are\nblocked on the given queue will be canceled." } summary: "Closes the given queue." description: "This operation signals that no more elements will be enqueued in the\ngiven queue. Subsequent Enqueue(Many) operations will fail.\nSubsequent Dequeue(Many) operations will continue to succeed if\nsufficient elements remain in the queue. Subsequent Dequeue(Many)\noperations that would block will fail immediately." is_stateful: true } op { name: "QueueDequeue" input_arg { name: "handle" description: "The handle to a queue." type: DT_STRING is_ref: true } output_arg { name: "components" description: "One or more tensors that were dequeued as a tuple." type_list_attr: "component_types" } attr { name: "component_types" type: "list(type)" description: "The type of each component in a tuple." has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue is empty, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet." } summary: "Dequeues a tuple of one or more tensors from the given queue." description: "This operation has k outputs, where k is the number of components\nin the tuples stored in the given queue, and output i is the ith\ncomponent of the dequeued tuple.\n\nN.B. If the queue is empty, this operation will block until an element\nhas been dequeued (or \'timeout_ms\' elapses, if specified)." } op { name: "QueueDequeueMany" input_arg { name: "handle" description: "The handle to a queue." type: DT_STRING is_ref: true } input_arg { name: "n" description: "The number of tuples to dequeue." type: DT_INT32 } output_arg { name: "components" description: "One or more tensors that were dequeued as a tuple." type_list_attr: "component_types" } attr { name: "component_types" type: "list(type)" description: "The type of each component in a tuple." has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue has fewer than n elements, this operation\nwill block for up to timeout_ms milliseconds.\nNote: This option is not supported yet." } summary: "Dequeues `n` tuples of one or more tensors from the given queue." description: "If the queue is closed and there are fewer than `n` elements, then an\nOutOfRange error is returned.\n\nThis operation concatenates queue-element component tensors along the\n0th dimension to make a single component tensor. All of the components\nin the dequeued tuple will have size `n` in the 0th dimension.\n\nThis operation has `k` outputs, where `k` is the number of components in\nthe tuples stored in the given queue, and output `i` is the ith\ncomponent of the dequeued tuple.\n\nN.B. If the queue is empty, this operation will block until `n` elements\nhave been dequeued (or \'timeout_ms\' elapses, if specified)." } op { name: "QueueDequeueManyV2" input_arg { name: "handle" description: "The handle to a queue." type: DT_RESOURCE } input_arg { name: "n" description: "The number of tuples to dequeue." type: DT_INT32 } output_arg { name: "components" description: "One or more tensors that were dequeued as a tuple." type_list_attr: "component_types" } attr { name: "component_types" type: "list(type)" description: "The type of each component in a tuple." has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue has fewer than n elements, this operation\nwill block for up to timeout_ms milliseconds.\nNote: This option is not supported yet." } summary: "Dequeues `n` tuples of one or more tensors from the given queue." description: "If the queue is closed and there are fewer than `n` elements, then an\nOutOfRange error is returned.\n\nThis operation concatenates queue-element component tensors along the\n0th dimension to make a single component tensor. All of the components\nin the dequeued tuple will have size `n` in the 0th dimension.\n\nThis operation has `k` outputs, where `k` is the number of components in\nthe tuples stored in the given queue, and output `i` is the ith\ncomponent of the dequeued tuple.\n\nN.B. If the queue is empty, this operation will block until `n` elements\nhave been dequeued (or \'timeout_ms\' elapses, if specified)." is_stateful: true } op { name: "QueueDequeueUpTo" input_arg { name: "handle" description: "The handle to a queue." type: DT_STRING is_ref: true } input_arg { name: "n" description: "The number of tuples to dequeue." type: DT_INT32 } output_arg { name: "components" description: "One or more tensors that were dequeued as a tuple." type_list_attr: "component_types" } attr { name: "component_types" type: "list(type)" description: "The type of each component in a tuple." has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue has fewer than n elements, this operation\nwill block for up to timeout_ms milliseconds.\nNote: This option is not supported yet." } summary: "Dequeues `n` tuples of one or more tensors from the given queue." description: "This operation is not supported by all queues. If a queue does not support\nDequeueUpTo, then an Unimplemented error is returned.\n\nIf the queue is closed and there are more than 0 but less than `n`\nelements remaining, then instead of returning an OutOfRange error like\nQueueDequeueMany, less than `n` elements are returned immediately. If\nthe queue is closed and there are 0 elements left in the queue, then\nan OutOfRange error is returned just like in QueueDequeueMany.\nOtherwise the behavior is identical to QueueDequeueMany:\n\nThis operation concatenates queue-element component tensors along the\n0th dimension to make a single component tensor. All of the components\nin the dequeued tuple will have size `n` in the 0th dimension.\n\nThis operation has k outputs, where `k` is the number of components in\nthe tuples stored in the given queue, and output `i` is the ith\ncomponent of the dequeued tuple." } op { name: "QueueDequeueUpToV2" input_arg { name: "handle" description: "The handle to a queue." type: DT_RESOURCE } input_arg { name: "n" description: "The number of tuples to dequeue." type: DT_INT32 } output_arg { name: "components" description: "One or more tensors that were dequeued as a tuple." type_list_attr: "component_types" } attr { name: "component_types" type: "list(type)" description: "The type of each component in a tuple." has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue has fewer than n elements, this operation\nwill block for up to timeout_ms milliseconds.\nNote: This option is not supported yet." } summary: "Dequeues `n` tuples of one or more tensors from the given queue." description: "This operation is not supported by all queues. If a queue does not support\nDequeueUpTo, then an Unimplemented error is returned.\n\nIf the queue is closed and there are more than 0 but less than `n`\nelements remaining, then instead of returning an OutOfRange error like\nQueueDequeueMany, less than `n` elements are returned immediately. If\nthe queue is closed and there are 0 elements left in the queue, then\nan OutOfRange error is returned just like in QueueDequeueMany.\nOtherwise the behavior is identical to QueueDequeueMany:\n\nThis operation concatenates queue-element component tensors along the\n0th dimension to make a single component tensor. All of the components\nin the dequeued tuple will have size n in the 0th dimension.\n\nThis operation has `k` outputs, where `k` is the number of components in\nthe tuples stored in the given queue, and output `i` is the ith\ncomponent of the dequeued tuple." is_stateful: true } op { name: "QueueDequeueV2" input_arg { name: "handle" description: "The handle to a queue." type: DT_RESOURCE } output_arg { name: "components" description: "One or more tensors that were dequeued as a tuple." type_list_attr: "component_types" } attr { name: "component_types" type: "list(type)" description: "The type of each component in a tuple." has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue is empty, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet." } summary: "Dequeues a tuple of one or more tensors from the given queue." description: "This operation has k outputs, where k is the number of components\nin the tuples stored in the given queue, and output i is the ith\ncomponent of the dequeued tuple.\n\nN.B. If the queue is empty, this operation will block until an element\nhas been dequeued (or \'timeout_ms\' elapses, if specified)." is_stateful: true } op { name: "QueueEnqueue" input_arg { name: "handle" description: "The handle to a queue." type: DT_STRING is_ref: true } input_arg { name: "components" description: "One or more tensors from which the enqueued tensors should be taken." type_list_attr: "Tcomponents" } attr { name: "Tcomponents" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue is full, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet." } summary: "Enqueues a tuple of one or more tensors in the given queue." description: "The components input has k elements, which correspond to the components of\ntuples stored in the given queue.\n\nN.B. If the queue is full, this operation will block until the given\nelement has been enqueued (or \'timeout_ms\' elapses, if specified)." } op { name: "QueueEnqueueMany" input_arg { name: "handle" description: "The handle to a queue." type: DT_STRING is_ref: true } input_arg { name: "components" description: "One or more tensors from which the enqueued tensors should\nbe taken." type_list_attr: "Tcomponents" } attr { name: "Tcomponents" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue is too full, this operation will block for up\nto timeout_ms milliseconds.\nNote: This option is not supported yet." } summary: "Enqueues zero or more tuples of one or more tensors in the given queue." description: "This operation slices each component tensor along the 0th dimension to\nmake multiple queue elements. All of the tuple components must have the\nsame size in the 0th dimension.\n\nThe components input has k elements, which correspond to the components of\ntuples stored in the given queue.\n\nN.B. If the queue is full, this operation will block until the given\nelements have been enqueued (or \'timeout_ms\' elapses, if specified)." } op { name: "QueueEnqueueManyV2" input_arg { name: "handle" description: "The handle to a queue." type: DT_RESOURCE } input_arg { name: "components" description: "One or more tensors from which the enqueued tensors should\nbe taken." type_list_attr: "Tcomponents" } attr { name: "Tcomponents" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue is too full, this operation will block for up\nto timeout_ms milliseconds.\nNote: This option is not supported yet." } summary: "Enqueues zero or more tuples of one or more tensors in the given queue." description: "This operation slices each component tensor along the 0th dimension to\nmake multiple queue elements. All of the tuple components must have the\nsame size in the 0th dimension.\n\nThe components input has k elements, which correspond to the components of\ntuples stored in the given queue.\n\nN.B. If the queue is full, this operation will block until the given\nelements have been enqueued (or \'timeout_ms\' elapses, if specified)." is_stateful: true } op { name: "QueueEnqueueV2" input_arg { name: "handle" description: "The handle to a queue." type: DT_RESOURCE } input_arg { name: "components" description: "One or more tensors from which the enqueued tensors should be taken." type_list_attr: "Tcomponents" } attr { name: "Tcomponents" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue is full, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet." } summary: "Enqueues a tuple of one or more tensors in the given queue." description: "The components input has k elements, which correspond to the components of\ntuples stored in the given queue.\n\nN.B. If the queue is full, this operation will block until the given\nelement has been enqueued (or \'timeout_ms\' elapses, if specified)." is_stateful: true } op { name: "QueueIsClosed" input_arg { name: "handle" description: "The handle to a queue." type: DT_STRING is_ref: true } output_arg { name: "is_closed" type: DT_BOOL } summary: "Returns true if queue is closed." description: "This operation returns true if the queue is closed and false if the queue\nis open." } op { name: "QueueIsClosedV2" input_arg { name: "handle" description: "The handle to a queue." type: DT_RESOURCE } output_arg { name: "is_closed" type: DT_BOOL } summary: "Returns true if queue is closed." description: "This operation returns true if the queue is closed and false if the queue\nis open." is_stateful: true } op { name: "QueueSize" input_arg { name: "handle" description: "The handle to a queue." type: DT_STRING is_ref: true } output_arg { name: "size" description: "The number of elements in the given queue." type: DT_INT32 } summary: "Computes the number of elements in the given queue." } op { name: "QueueSizeV2" input_arg { name: "handle" description: "The handle to a queue." type: DT_RESOURCE } output_arg { name: "size" description: "The number of elements in the given queue." type: DT_INT32 } summary: "Computes the number of elements in the given queue." is_stateful: true } op { name: "RFFT" input_arg { name: "input" description: "A float32 tensor." type: DT_FLOAT } input_arg { name: "fft_length" description: "An int32 tensor of shape [1]. The FFT length." type: DT_INT32 } output_arg { name: "output" description: "A complex64 tensor of the same rank as `input`. The inner-most\n dimension of `input` is replaced with the `fft_length / 2 + 1` unique\n frequency components of its 1D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.rfft\n@end_compatibility" type: DT_COMPLEX64 } summary: "Real-valued fast Fourier transform." description: "Computes the 1-dimensional discrete Fourier transform of a real-valued signal\nover the inner-most dimension of `input`.\n\nSince the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the\n`fft_length / 2 + 1` unique components of the FFT: the zero-frequency term,\nfollowed by the `fft_length / 2` positive-frequency terms.\n\nAlong the axis `RFFT` is computed on, if `fft_length` is smaller than the\ncorresponding dimension of `input`, the dimension is cropped. If it is larger,\nthe dimension is padded with zeros." } op { name: "RFFT2D" input_arg { name: "input" description: "A float32 tensor." type: DT_FLOAT } input_arg { name: "fft_length" description: "An int32 tensor of shape [2]. The FFT length for each dimension." type: DT_INT32 } output_arg { name: "output" description: "A complex64 tensor of the same rank as `input`. The inner-most 2\n dimensions of `input` are replaced with their 2D Fourier transform. The\n inner-most dimension contains `fft_length / 2 + 1` unique frequency\n components.\n\n@compatibility(numpy)\nEquivalent to np.fft.rfft2\n@end_compatibility" type: DT_COMPLEX64 } summary: "2D real-valued fast Fourier transform." description: "Computes the 2-dimensional discrete Fourier transform of a real-valued signal\nover the inner-most 2 dimensions of `input`.\n\nSince the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the\n`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension\nof `output`: the zero-frequency term, followed by the `fft_length / 2`\npositive-frequency terms.\n\nAlong each axis `RFFT2D` is computed on, if `fft_length` is smaller than the\ncorresponding dimension of `input`, the dimension is cropped. If it is larger,\nthe dimension is padded with zeros." } op { name: "RFFT3D" input_arg { name: "input" description: "A float32 tensor." type: DT_FLOAT } input_arg { name: "fft_length" description: "An int32 tensor of shape [3]. The FFT length for each dimension." type: DT_INT32 } output_arg { name: "output" description: "A complex64 tensor of the same rank as `input`. The inner-most 3\n dimensions of `input` are replaced with the their 3D Fourier transform. The\n inner-most dimension contains `fft_length / 2 + 1` unique frequency\n components.\n\n@compatibility(numpy)\nEquivalent to np.fft.rfftn with 3 dimensions.\n@end_compatibility" type: DT_COMPLEX64 } summary: "3D real-valued fast Fourier transform." description: "Computes the 3-dimensional discrete Fourier transform of a real-valued signal\nover the inner-most 3 dimensions of `input`.\n\nSince the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the\n`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension\nof `output`: the zero-frequency term, followed by the `fft_length / 2`\npositive-frequency terms.\n\nAlong each axis `RFFT3D` is computed on, if `fft_length` is smaller than the\ncorresponding dimension of `input`, the dimension is cropped. If it is larger,\nthe dimension is padded with zeros." } op { name: "RGBToHSV" input_arg { name: "images" description: "1-D or higher rank. RGB data to convert. Last dimension must be size 3." type_attr: "T" } output_arg { name: "output" description: "`images` converted to HSV." type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } summary: "Converts one or more images from RGB to HSV." description: "Outputs a tensor of the same shape as the `images` tensor, containing the HSV\nvalue of the pixels. The output is only well defined if the value in `images`\nare in `[0,1]`.\n\n`output[..., 0]` contains hue, `output[..., 1]` contains saturation, and\n`output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0\ncorresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue." } op { name: "RandomCrop" input_arg { name: "image" description: "3-D of shape `[height, width, channels]`." type_attr: "T" } input_arg { name: "size" description: "1-D of length 2 containing: `crop_height`, `crop_width`.." type: DT_INT64 } output_arg { name: "output" description: "3-D of shape `[crop_height, crop_width, channels].`" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "An second seed to avoid seed collision." } summary: "Randomly crop `image`." description: "`size` is a 1-D int64 tensor with 2 elements representing the crop height and\nwidth. The values must be non negative.\n\nThis Op picks a random location in `image` and crops a `height` by `width`\nrectangle from that location. The random location is picked so the cropped\narea will fit inside the original image." deprecation { version: 8 explanation: "Random crop is now pure Python" } is_stateful: true } op { name: "RandomGamma" input_arg { name: "shape" description: "1-D integer tensor. Shape of independent samples to draw from each\ndistribution described by the shape parameters given in alpha." type_attr: "S" } input_arg { name: "alpha" description: "A tensor in which each scalar is a \"shape\" parameter describing the\nassociated gamma distribution." type_attr: "T" } output_arg { name: "output" description: "A tensor with shape `shape + shape(alpha)`. Each slice\n`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for\n`alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha." type_attr: "T" } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "S" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } summary: "Outputs random values from the Gamma distribution(s) described by alpha." description: "This op uses the algorithm by Marsaglia et al. to acquire samples via\ntransformation-rejection from pairs of uniform and normal random variables.\nSee http://dl.acm.org/citation.cfm?id=358414" is_stateful: true } op { name: "RandomPoisson" input_arg { name: "shape" type_attr: "S" } input_arg { name: "rate" type_attr: "dtype" } output_arg { name: "output" type_attr: "dtype" } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "S" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "dtype" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } summary: "Use RandomPoissonV2 instead." deprecation { version: 25 explanation: "Replaced by RandomPoissonV2" } is_stateful: true } op { name: "RandomPoissonV2" input_arg { name: "shape" description: "1-D integer tensor. Shape of independent samples to draw from each\ndistribution described by the shape parameters given in rate." type_attr: "S" } input_arg { name: "rate" description: "A tensor in which each scalar is a \"rate\" parameter describing the\nassociated poisson distribution." type_attr: "R" } output_arg { name: "output" description: "A tensor with shape `shape + shape(rate)`. Each slice\n`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for\n`rate[i0, i1, ...iN]`." type_attr: "dtype" } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "S" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "R" type: "type" default_value { type: DT_DOUBLE } allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } attr { name: "dtype" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } summary: "Outputs random values from the Poisson distribution(s) described by rate." description: "This op uses two algorithms, depending on rate. If rate >= 10, then\nthe algorithm by Hormann is used to acquire samples via\ntransformation-rejection.\nSee http://www.sciencedirect.com/science/article/pii/0167668793909974.\n\nOtherwise, Knuth\'s algorithm is used to acquire samples via multiplying uniform\nrandom variables.\nSee Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer\nProgramming, Volume 2. Addison Wesley" is_stateful: true } op { name: "RandomShuffle" input_arg { name: "value" description: "The tensor to be shuffled." type_attr: "T" } output_arg { name: "output" description: "A tensor of same shape and type as `value`, shuffled along its first\ndimension." type_attr: "T" } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "T" type: "type" } summary: "Randomly shuffles a tensor along its first dimension." description: " The tensor is shuffled along dimension 0, such that each `value[j]` is mapped\n to one and only one `output[i]`. For example, a mapping that might occur for a\n 3x2 tensor is:\n\n```\n[[1, 2], [[5, 6],\n [3, 4], ==> [1, 2],\n [5, 6]] [3, 4]]\n```" is_stateful: true } op { name: "RandomShuffleQueue" output_arg { name: "handle" description: "The handle to the queue." type: DT_STRING is_ref: true } attr { name: "component_types" type: "list(type)" description: "The type of each component in a value." has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" default_value { list { } } description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time." has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit." } attr { name: "min_after_dequeue" type: "int" default_value { i: 0 } description: "Dequeue will block unless there would be this\nmany elements after the dequeue or the queue is closed. This\nensures a minimum level of mixing of elements." } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 is set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, a random seed is used." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions." } summary: "A queue that randomizes the order of elements." is_stateful: true } op { name: "RandomShuffleQueueV2" output_arg { name: "handle" description: "The handle to the queue." type: DT_RESOURCE } attr { name: "component_types" type: "list(type)" description: "The type of each component in a value." has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" default_value { list { } } description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time." has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit." } attr { name: "min_after_dequeue" type: "int" default_value { i: 0 } description: "Dequeue will block unless there would be this\nmany elements after the dequeue or the queue is closed. This\nensures a minimum level of mixing of elements." } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 is set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, a random seed is used." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions." } summary: "A queue that randomizes the order of elements." is_stateful: true } op { name: "RandomStandardNormal" input_arg { name: "shape" description: "The shape of the output tensor." type_attr: "T" } output_arg { name: "output" description: "A tensor of the specified shape filled with random normal values." type_attr: "dtype" } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "dtype" type: "type" description: "The type of the output." allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Outputs random values from a normal distribution." description: "The generated values will have mean 0 and standard deviation 1." is_stateful: true } op { name: "RandomUniform" input_arg { name: "shape" description: "The shape of the output tensor." type_attr: "T" } output_arg { name: "output" description: "A tensor of the specified shape filled with uniform random values." type_attr: "dtype" } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "dtype" type: "type" description: "The type of the output." allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Outputs random values from a uniform distribution." description: "The generated values follow a uniform distribution in the range `[0, 1)`. The\nlower bound 0 is included in the range, while the upper bound 1 is excluded." is_stateful: true } op { name: "RandomUniformInt" input_arg { name: "shape" description: "The shape of the output tensor." type_attr: "T" } input_arg { name: "minval" description: "0-D. Inclusive lower bound on the generated integers." type_attr: "Tout" } input_arg { name: "maxval" description: "0-D. Exclusive upper bound on the generated integers." type_attr: "Tout" } output_arg { name: "output" description: "A tensor of the specified shape filled with uniform random integers." type_attr: "Tout" } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "Tout" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Outputs random integers from a uniform distribution." description: "The generated values are uniform integers in the range `[minval, maxval)`.\nThe lower bound `minval` is included in the range, while the upper bound\n`maxval` is excluded.\n\nThe random integers are slightly biased unless `maxval - minval` is an exact\npower of two. The bias is small for values of `maxval - minval` significantly\nsmaller than the range of the output (either `2^32` or `2^64`)." is_stateful: true } op { name: "Range" input_arg { name: "start" description: "0-D (scalar). First entry in the sequence." type_attr: "Tidx" } input_arg { name: "limit" description: "0-D (scalar). Upper limit of sequence, exclusive." type_attr: "Tidx" } input_arg { name: "delta" description: "0-D (scalar). Optional. Default is 1. Number that increments `start`." type_attr: "Tidx" } output_arg { name: "output" description: "1-D." type_attr: "Tidx" } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } summary: "Creates a sequence of numbers." description: "This operation creates a sequence of numbers that begins at `start` and\nextends by increments of `delta` up to but not including `limit`.\n\nFor example:\n\n```\n# \'start\' is 3\n# \'limit\' is 18\n# \'delta\' is 3\ntf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]\n```" } op { name: "RangeDataset" input_arg { name: "start" description: "corresponds to start in python\'s xrange()." type: DT_INT64 } input_arg { name: "stop" description: "corresponds to stop in python\'s xrange()." type: DT_INT64 } input_arg { name: "step" description: "corresponds to step in python\'s xrange()." type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "Creates a dataset with a range of values. Corresponds to python\'s xrange." is_stateful: true } op { name: "Rank" input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type: DT_INT32 } attr { name: "T" type: "type" } summary: "Returns the rank of a tensor." description: "This operation returns an integer representing the rank of `input`.\n\nFor example:\n\n```\n# \'t\' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]\n# shape of tensor \'t\' is [2, 2, 3]\nrank(t) ==> 3\n```\n\n**Note**: The rank of a tensor is not the same as the rank of a matrix. The rank\nof a tensor is the number of indices required to uniquely select each element\nof the tensor. Rank is also known as \"order\", \"degree\", or \"ndims.\"" } op { name: "ReadFile" input_arg { name: "filename" type: DT_STRING } output_arg { name: "contents" type: DT_STRING } summary: "Reads and outputs the entire contents of the input filename." } op { name: "ReadVariableOp" input_arg { name: "resource" description: "handle to the resource in which to store the variable." type: DT_RESOURCE } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" description: "the dtype of the value." } summary: "Reads the value of a variable." description: "The tensor returned by this operation is immutable.\n\nThe value returned by this operation is guaranteed to be influenced by all the\nwrites on which this operation depends directly or indirectly, and to not be\ninfluenced by any of the writes which depend directly or indirectly on this\noperation." is_stateful: true } op { name: "ReaderNumRecordsProduced" input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_STRING is_ref: true } output_arg { name: "records_produced" type: DT_INT64 } summary: "Returns the number of records this Reader has produced." description: "This is the same as the number of ReaderRead executions that have\nsucceeded." } op { name: "ReaderNumRecordsProducedV2" input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_RESOURCE } output_arg { name: "records_produced" type: DT_INT64 } summary: "Returns the number of records this Reader has produced." description: "This is the same as the number of ReaderRead executions that have\nsucceeded." is_stateful: true } op { name: "ReaderNumWorkUnitsCompleted" input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_STRING is_ref: true } output_arg { name: "units_completed" type: DT_INT64 } summary: "Returns the number of work units this Reader has finished processing." } op { name: "ReaderNumWorkUnitsCompletedV2" input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_RESOURCE } output_arg { name: "units_completed" type: DT_INT64 } summary: "Returns the number of work units this Reader has finished processing." is_stateful: true } op { name: "ReaderRead" input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_STRING is_ref: true } input_arg { name: "queue_handle" description: "Handle to a Queue, with string work items." type: DT_STRING is_ref: true } output_arg { name: "key" description: "A scalar." type: DT_STRING } output_arg { name: "value" description: "A scalar." type: DT_STRING } summary: "Returns the next record (key, value pair) produced by a Reader." description: "Will dequeue from the input queue if necessary (e.g. when the\nReader needs to start reading from a new file since it has finished\nwith the previous file)." } op { name: "ReaderReadUpTo" input_arg { name: "reader_handle" description: "Handle to a `Reader`." type: DT_STRING is_ref: true } input_arg { name: "queue_handle" description: "Handle to a `Queue`, with string work items." type: DT_STRING is_ref: true } input_arg { name: "num_records" description: "number of records to read from `Reader`." type: DT_INT64 } output_arg { name: "keys" description: "A 1-D tensor." type: DT_STRING } output_arg { name: "values" description: "A 1-D tensor." type: DT_STRING } summary: "Returns up to `num_records` (key, value) pairs produced by a Reader." description: "Will dequeue from the input queue if necessary (e.g. when the\nReader needs to start reading from a new file since it has finished\nwith the previous file).\nIt may return less than `num_records` even before the last batch." } op { name: "ReaderReadUpToV2" input_arg { name: "reader_handle" description: "Handle to a `Reader`." type: DT_RESOURCE } input_arg { name: "queue_handle" description: "Handle to a `Queue`, with string work items." type: DT_RESOURCE } input_arg { name: "num_records" description: "number of records to read from `Reader`." type: DT_INT64 } output_arg { name: "keys" description: "A 1-D tensor." type: DT_STRING } output_arg { name: "values" description: "A 1-D tensor." type: DT_STRING } summary: "Returns up to `num_records` (key, value) pairs produced by a Reader." description: "Will dequeue from the input queue if necessary (e.g. when the\nReader needs to start reading from a new file since it has finished\nwith the previous file).\nIt may return less than `num_records` even before the last batch." is_stateful: true } op { name: "ReaderReadV2" input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_RESOURCE } input_arg { name: "queue_handle" description: "Handle to a Queue, with string work items." type: DT_RESOURCE } output_arg { name: "key" description: "A scalar." type: DT_STRING } output_arg { name: "value" description: "A scalar." type: DT_STRING } summary: "Returns the next record (key, value pair) produced by a Reader." description: "Will dequeue from the input queue if necessary (e.g. when the\nReader needs to start reading from a new file since it has finished\nwith the previous file)." is_stateful: true } op { name: "ReaderReset" input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_STRING is_ref: true } summary: "Restore a Reader to its initial clean state." } op { name: "ReaderResetV2" input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_RESOURCE } summary: "Restore a Reader to its initial clean state." is_stateful: true } op { name: "ReaderRestoreState" input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_STRING is_ref: true } input_arg { name: "state" description: "Result of a ReaderSerializeState of a Reader with type\nmatching reader_handle." type: DT_STRING } summary: "Restore a reader to a previously saved state." description: "Not all Readers support being restored, so this can produce an\nUnimplemented error." } op { name: "ReaderRestoreStateV2" input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_RESOURCE } input_arg { name: "state" description: "Result of a ReaderSerializeState of a Reader with type\nmatching reader_handle." type: DT_STRING } summary: "Restore a reader to a previously saved state." description: "Not all Readers support being restored, so this can produce an\nUnimplemented error." is_stateful: true } op { name: "ReaderSerializeState" input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_STRING is_ref: true } output_arg { name: "state" type: DT_STRING } summary: "Produce a string tensor that encodes the state of a Reader." description: "Not all Readers support being serialized, so this can produce an\nUnimplemented error." } op { name: "ReaderSerializeStateV2" input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_RESOURCE } output_arg { name: "state" type: DT_STRING } summary: "Produce a string tensor that encodes the state of a Reader." description: "Not all Readers support being serialized, so this can produce an\nUnimplemented error." is_stateful: true } op { name: "Real" input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "Tout" } attr { name: "T" type: "type" default_value { type: DT_COMPLEX64 } allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } } } attr { name: "Tout" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } summary: "Returns the real part of a complex number." description: "Given a tensor `input` of complex numbers, this operation returns a tensor of\ntype `float` that is the real part of each element in `input`. All elements in\n`input` must be complex numbers of the form \\\\(a + bj\\\\), where *a* is the real\n part returned by this operation and *b* is the imaginary part.\n\nFor example:\n\n```\n# tensor \'input\' is [-2.25 + 4.75j, 3.25 + 5.75j]\ntf.real(input) ==> [-2.25, 3.25]\n```" } op { name: "RealDiv" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Returns x / y element-wise for real types." description: "If `x` and `y` are reals, this will return the floating-point division.\n\n*NOTE*: `Div` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" } op { name: "Reciprocal" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes the reciprocal of x element-wise." description: "I.e., \\\\(y = 1 / x\\\\)." } op { name: "ReciprocalGrad" input_arg { name: "y" type_attr: "T" } input_arg { name: "dy" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes the gradient for the inverse of `x` wrt its input." description: "Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`\nis the corresponding input gradient." } op { name: "RecordInput" output_arg { name: "records" description: "A tensor of shape [batch_size]." type: DT_STRING } attr { name: "file_pattern" type: "string" description: "Glob pattern for the data files." } attr { name: "file_random_seed" type: "int" default_value { i: 301 } description: "Random seeds used to produce randomized records." } attr { name: "file_shuffle_shift_ratio" type: "float" default_value { f: 0 } description: "Shifts the list of files after the list is randomly\nshuffled." } attr { name: "file_buffer_size" type: "int" default_value { i: 10000 } description: "The randomization shuffling buffer." } attr { name: "file_parallelism" type: "int" default_value { i: 16 } description: "How many sstables are opened and concurrently iterated over." } attr { name: "batch_size" type: "int" default_value { i: 32 } description: "The batch size." } summary: "Emits randomized records." is_stateful: true } op { name: "ReduceJoin" input_arg { name: "inputs" description: "The input to be joined. All reduced indices must have non-zero size." type: DT_STRING } input_arg { name: "reduction_indices" description: "The dimensions to reduce over. Dimensions are reduced in the\norder specified. Omitting `reduction_indices` is equivalent to passing\n`[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported." type: DT_INT32 } output_arg { name: "output" description: "Has shape equal to that of the input with reduced dimensions removed or\nset to `1` depending on `keep_dims`." type: DT_STRING } attr { name: "keep_dims" type: "bool" default_value { b: false } description: "If `True`, retain reduced dimensions with length `1`." } attr { name: "separator" type: "string" default_value { s: "" } description: "The separator to use when joining." } summary: "Joins a string Tensor across the given dimensions." description: "Computes the string join across dimensions in the given string Tensor of shape\n`[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by joining the input\nstrings with the given separator (default: empty string). Negative indices are\ncounted backwards from the end, with `-1` being equivalent to `n - 1`.\n\nFor example:\n\n```python\n# tensor `a` is [[\"a\", \"b\"], [\"c\", \"d\"]]\ntf.reduce_join(a, 0) ==> [\"ac\", \"bd\"]\ntf.reduce_join(a, 1) ==> [\"ab\", \"cd\"]\ntf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> [\"ac\", \"bd\"]\ntf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> [\"ab\", \"cd\"]\ntf.reduce_join(a, 0, keep_dims=True) ==> [[\"ac\", \"bd\"]]\ntf.reduce_join(a, 1, keep_dims=True) ==> [[\"ab\"], [\"cd\"]]\ntf.reduce_join(a, 0, separator=\".\") ==> [\"a.c\", \"b.d\"]\ntf.reduce_join(a, [0, 1]) ==> [\"acbd\"]\ntf.reduce_join(a, [1, 0]) ==> [\"abcd\"]\ntf.reduce_join(a, []) ==> [\"abcd\"]\n```" } op { name: "RefEnter" input_arg { name: "data" description: "The tensor to be made available to the child frame." type_attr: "T" is_ref: true } output_arg { name: "output" description: "The same tensor as `data`." type_attr: "T" is_ref: true } attr { name: "T" type: "type" } attr { name: "frame_name" type: "string" description: "The name of the child frame." } attr { name: "is_constant" type: "bool" default_value { b: false } description: "If true, the output is constant within the child frame." } attr { name: "parallel_iterations" type: "int" default_value { i: 10 } description: "The number of iterations allowed to run in parallel." } summary: "Creates or finds a child frame, and makes `data` available to the child frame." description: "The unique `frame_name` is used by the `Executor` to identify frames. If\n`is_constant` is true, `output` is a constant in the child frame; otherwise\nit may be changed in the child frame. At most `parallel_iterations` iterations\nare run in parallel in the child frame." } op { name: "RefExit" input_arg { name: "data" description: "The tensor to be made available to the parent frame." type_attr: "T" is_ref: true } output_arg { name: "output" description: "The same tensor as `data`." type_attr: "T" is_ref: true } attr { name: "T" type: "type" } summary: "Exits the current frame to its parent frame." description: "Exit makes its input `data` available to the parent frame." } op { name: "RefIdentity" input_arg { name: "input" type_attr: "T" is_ref: true } output_arg { name: "output" type_attr: "T" is_ref: true } attr { name: "T" type: "type" } summary: "Return the same ref tensor as the input ref tensor." allows_uninitialized_input: true } op { name: "RefMerge" input_arg { name: "inputs" description: "The input tensors, exactly one of which will become available." type_attr: "T" number_attr: "N" is_ref: true } output_arg { name: "output" description: "Will be set to the available input tensor." type_attr: "T" is_ref: true } output_arg { name: "value_index" description: "The index of the chosen input tensor in `inputs`." type: DT_INT32 } attr { name: "T" type: "type" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } summary: "Forwards the value of an available tensor from `inputs` to `output`." description: "`Merge` waits for at least one of the tensors in `inputs` to become available.\nIt is usually combined with `Switch` to implement branching.\n\n`Merge` forwards the first tensor for become available to `output`, and sets\n`value_index` to its index in `inputs`." } op { name: "RefNextIteration" input_arg { name: "data" description: "The tensor to be made available to the next iteration." type_attr: "T" is_ref: true } output_arg { name: "output" description: "The same tensor as `data`." type_attr: "T" is_ref: true } attr { name: "T" type: "type" } summary: "Makes its input available to the next iteration." } op { name: "RefSelect" input_arg { name: "index" description: "A scalar that determines the input that gets selected." type: DT_INT32 } input_arg { name: "inputs" description: "A list of ref tensors, one of which will be forwarded to `output`." type_attr: "T" number_attr: "N" is_ref: true } output_arg { name: "output" description: "The forwarded tensor." type_attr: "T" is_ref: true } attr { name: "T" type: "type" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } summary: "Forwards the `index`th element of `inputs` to `output`." } op { name: "RefSwitch" input_arg { name: "data" description: "The ref tensor to be forwarded to the appropriate output." type_attr: "T" is_ref: true } input_arg { name: "pred" description: "A scalar that specifies which output port will receive data." type: DT_BOOL } output_arg { name: "output_false" description: "If `pred` is false, data will be forwarded to this output." type_attr: "T" is_ref: true } output_arg { name: "output_true" description: "If `pred` is true, data will be forwarded to this output." type_attr: "T" is_ref: true } attr { name: "T" type: "type" } summary: "Forwards the ref tensor `data` to the output port determined by `pred`." description: "If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,\nthe data goes to `output_false`.\n\nSee also `Switch` and `Merge`." allows_uninitialized_input: true } op { name: "Relu" input_arg { name: "features" type_attr: "T" } output_arg { name: "activations" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Computes rectified linear: `max(features, 0)`." } op { name: "Relu6" input_arg { name: "features" type_attr: "T" } output_arg { name: "activations" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Computes rectified linear 6: `min(max(features, 0), 6)`." } op { name: "Relu6Grad" input_arg { name: "gradients" description: "The backpropagated gradients to the corresponding Relu6 operation." type_attr: "T" } input_arg { name: "features" description: "The features passed as input to the corresponding Relu6 operation, or\nits output; using either one produces the same result." type_attr: "T" } output_arg { name: "backprops" description: "The gradients:\n`gradients * (features > 0) * (features < 6)`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Computes rectified linear 6 gradients for a Relu6 operation." } op { name: "ReluGrad" input_arg { name: "gradients" description: "The backpropagated gradients to the corresponding Relu operation." type_attr: "T" } input_arg { name: "features" description: "The features passed as input to the corresponding Relu operation, OR\nthe outputs of that operation (both work equivalently)." type_attr: "T" } output_arg { name: "backprops" description: "`gradients * (features > 0)`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Computes rectified linear gradients for a Relu operation." } op { name: "RemoteCall" input_arg { name: "target" description: "A fully specified device name where we want to run the function." type: DT_STRING } input_arg { name: "args" description: "A list of arguments for the function." type_list_attr: "Tin" } output_arg { name: "output" description: "A list of return values." type_list_attr: "Tout" } attr { name: "Tin" type: "list(type)" description: "The type list for the arguments." has_minimum: true minimum: 1 } attr { name: "Tout" type: "list(type)" description: "The type list for the return values." has_minimum: true minimum: 1 } attr { name: "f" type: "func" description: "The function to run remotely." } summary: "Runs function `f` on a remote device indicated by `target`." } op { name: "RemoteFusedGraphExecute" input_arg { name: "inputs" description: "Arbitrary number of tensors with arbitrary data types" type_list_attr: "Tinputs" } output_arg { name: "outputs" description: "Arbitrary number of tensors with arbitrary data types" type_list_attr: "Toutputs" } attr { name: "Tinputs" type: "list(type)" has_minimum: true } attr { name: "Toutputs" type: "list(type)" has_minimum: true } attr { name: "serialized_remote_fused_graph_execute_info" type: "string" description: "Serialized protocol buffer\nof RemoteFusedGraphExecuteInfo which contains graph specifications." } summary: "Execute a sub graph on a remote processor." description: "The graph specifications(such as graph itself, input tensors and output names)\nare stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo\nas serialized_remote_fused_graph_execute_info.\nThe specifications will be passed to a dedicated registered\nremote fused graph executor. The executor will send the graph specifications\nto a remote processor and execute that graph. The execution results\nwill be passed to consumer nodes as outputs of this node." } op { name: "RepeatDataset" input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "count" description: "A scalar representing the number of times that `input_dataset` should\nbe repeated. A value of `-1` indicates that it should be repeated infinitely." type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "Creates a dataset that emits the outputs of `input_dataset` `count` times." } op { name: "RequantizationRange" input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "input_min" description: "The float value that the minimum quantized input value represents." type: DT_FLOAT } input_arg { name: "input_max" description: "The float value that the maximum quantized input value represents." type: DT_FLOAT } output_arg { name: "output_min" description: "The computed min output." type: DT_FLOAT } output_arg { name: "output_max" description: "the computed max output." type: DT_FLOAT } attr { name: "Tinput" type: "type" description: "The type of the input." allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } summary: "Given a quantized tensor described by (input, input_min, input_max), outputs a" description: "range that covers the actual values present in that tensor. This op is\ntypically used to produce the requested_output_min and requested_output_max for\nRequantize." } op { name: "Requantize" input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "input_min" description: "The float value that the minimum quantized input value represents." type: DT_FLOAT } input_arg { name: "input_max" description: "The float value that the maximum quantized input value represents." type: DT_FLOAT } input_arg { name: "requested_output_min" description: "The float value that the minimum quantized output value represents." type: DT_FLOAT } input_arg { name: "requested_output_max" description: "The float value that the maximum quantized output value represents." type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "output_min" description: "The requested_output_min value is copied into this output." type: DT_FLOAT } output_arg { name: "output_max" description: "The requested_output_max value is copied into this output." type: DT_FLOAT } attr { name: "Tinput" type: "type" description: "The type of the input." allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "out_type" type: "type" description: "The type of the output. Should be a lower bit depth than Tinput." allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } summary: "Convert the quantized \'input\' tensor into a lower-precision \'output\', using the" description: "output range specified with \'requested_output_min\' and \'requested_output_max\'.\n\n[input_min, input_max] are scalar floats that specify the range for the float\ninterpretation of the \'input\' data. For example, if input_min is -1.0f and\ninput_max is 1.0f, and we are dealing with quint16 quantized data, then a 0\nvalue in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f." } op { name: "Reshape" input_arg { name: "tensor" type_attr: "T" } input_arg { name: "shape" description: "Defines the shape of the output tensor." type_attr: "Tshape" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tshape" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Reshapes a tensor." description: "Given `tensor`, this operation returns a tensor that has the same values\nas `tensor` with shape `shape`.\n\nIf one component of `shape` is the special value -1, the size of that dimension\nis computed so that the total size remains constant. In particular, a `shape`\nof `[-1]` flattens into 1-D. At most one component of `shape` can be -1.\n\nIf `shape` is 1-D or higher, then the operation returns a tensor with shape\n`shape` filled with the values of `tensor`. In this case, the number of elements\nimplied by `shape` must be the same as the number of elements in `tensor`.\n\nFor example:\n\n```\n# tensor \'t\' is [1, 2, 3, 4, 5, 6, 7, 8, 9]\n# tensor \'t\' has shape [9]\nreshape(t, [3, 3]) ==> [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n\n# tensor \'t\' is [[[1, 1], [2, 2]],\n# [[3, 3], [4, 4]]]\n# tensor \'t\' has shape [2, 2, 2]\nreshape(t, [2, 4]) ==> [[1, 1, 2, 2],\n [3, 3, 4, 4]]\n\n# tensor \'t\' is [[[1, 1, 1],\n# [2, 2, 2]],\n# [[3, 3, 3],\n# [4, 4, 4]],\n# [[5, 5, 5],\n# [6, 6, 6]]]\n# tensor \'t\' has shape [3, 2, 3]\n# pass \'[-1]\' to flatten \'t\'\nreshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]\n\n# -1 can also be used to infer the shape\n\n# -1 is inferred to be 9:\nreshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],\n [4, 4, 4, 5, 5, 5, 6, 6, 6]]\n# -1 is inferred to be 2:\nreshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],\n [4, 4, 4, 5, 5, 5, 6, 6, 6]]\n# -1 is inferred to be 3:\nreshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]],\n [[4, 4, 4],\n [5, 5, 5],\n [6, 6, 6]]]\n\n# tensor \'t\' is [7]\n# shape `[]` reshapes to a scalar\nreshape(t, []) ==> 7\n```" } op { name: "ResizeArea" input_arg { name: "images" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } input_arg { name: "size" description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images." type: DT_INT32 } output_arg { name: "resized_images" description: "4-D with shape\n`[batch, new_height, new_width, channels]`." type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_UINT8 type: DT_INT16 type: DT_UINT16 type: DT_INT32 type: DT_INT64 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "align_corners" type: "bool" default_value { b: false } description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension." } summary: "Resize `images` to `size` using area interpolation." description: "Input images can be of different types but output images are always float.\n\nEach output pixel is computed by first transforming the pixel\'s footprint into\nthe input tensor and then averaging the pixels that intersect the footprint. An\ninput pixel\'s contribution to the average is weighted by the fraction of its\narea that intersects the footprint. This is the same as OpenCV\'s INTER_AREA." } op { name: "ResizeBicubic" input_arg { name: "images" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } input_arg { name: "size" description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images." type: DT_INT32 } output_arg { name: "resized_images" description: "4-D with shape\n`[batch, new_height, new_width, channels]`." type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_UINT8 type: DT_INT16 type: DT_UINT16 type: DT_INT32 type: DT_INT64 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "align_corners" type: "bool" default_value { b: false } description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension." } summary: "Resize `images` to `size` using bicubic interpolation." description: "Input images can be of different types but output images are always float." } op { name: "ResizeBicubicGrad" input_arg { name: "grads" description: "4-D with shape `[batch, height, width, channels]`." type: DT_FLOAT } input_arg { name: "original_image" description: "4-D with shape `[batch, orig_height, orig_width, channels]`,\nThe image tensor that was resized." type_attr: "T" } output_arg { name: "output" description: "4-D with shape `[batch, orig_height, orig_width, channels]`.\nGradients with respect to the input image. Input image must have been\nfloat or double." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "align_corners" type: "bool" default_value { b: false } description: "If true, rescale grads by (orig_height - 1) / (height - 1), which\nexactly aligns the 4 corners of grads and original_image. If false, rescale by\norig_height / height. Treat similarly the width dimension." } summary: "Computes the gradient of bicubic interpolation." } op { name: "ResizeBilinear" input_arg { name: "images" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } input_arg { name: "size" description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images." type: DT_INT32 } output_arg { name: "resized_images" description: "4-D with shape\n`[batch, new_height, new_width, channels]`." type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_UINT8 type: DT_INT16 type: DT_UINT16 type: DT_INT32 type: DT_INT64 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "align_corners" type: "bool" default_value { b: false } description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension." } summary: "Resize `images` to `size` using bilinear interpolation." description: "Input images can be of different types but output images are always float." } op { name: "ResizeBilinearGrad" input_arg { name: "grads" description: "4-D with shape `[batch, height, width, channels]`." type: DT_FLOAT } input_arg { name: "original_image" description: "4-D with shape `[batch, orig_height, orig_width, channels]`,\nThe image tensor that was resized." type_attr: "T" } output_arg { name: "output" description: "4-D with shape `[batch, orig_height, orig_width, channels]`.\nGradients with respect to the input image. Input image must have been\nfloat or double." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE } } } attr { name: "align_corners" type: "bool" default_value { b: false } description: "If true, rescale grads by (orig_height - 1) / (height - 1), which\nexactly aligns the 4 corners of grads and original_image. If false, rescale by\norig_height / height. Treat similarly the width dimension." } summary: "Computes the gradient of bilinear interpolation." } op { name: "ResizeNearestNeighbor" input_arg { name: "images" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } input_arg { name: "size" description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images." type: DT_INT32 } output_arg { name: "resized_images" description: "4-D with shape\n`[batch, new_height, new_width, channels]`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_UINT8 type: DT_INT16 type: DT_UINT16 type: DT_INT32 type: DT_INT64 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "align_corners" type: "bool" default_value { b: false } description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension." } summary: "Resize `images` to `size` using nearest neighbor interpolation." } op { name: "ResizeNearestNeighborGrad" input_arg { name: "grads" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } input_arg { name: "size" description: "= A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The\noriginal input size." type: DT_INT32 } output_arg { name: "output" description: "4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients\nwith respect to the input image." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT32 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "align_corners" type: "bool" default_value { b: false } description: "If true, rescale grads by (orig_height - 1) / (height - 1), which\nexactly aligns the 4 corners of grads and original_image. If false, rescale by\norig_height / height. Treat similarly the width dimension." } summary: "Computes the gradient of nearest neighbor interpolation." } op { name: "ResourceApplyAdadelta" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum_update" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay factor. Must be a scalar." type_attr: "T" } input_arg { name: "epsilon" description: "Constant factor. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var, accum and update_accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } summary: "Update \'*var\' according to the adadelta scheme." description: "accum = rho() * accum + (1 - rho()) * grad.square();\nupdate = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;\nupdate_accum = rho() * update_accum + (1 - rho()) * update.square();\nvar -= update;" is_stateful: true } op { name: "ResourceApplyAdagrad" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } summary: "Update \'*var\' according to the adagrad scheme." description: "accum += grad * grad\nvar -= lr * grad * (1 / sqrt(accum))" is_stateful: true } op { name: "ResourceApplyAdagradDA" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "gradient_accumulator" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "gradient_squared_accumulator" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "global_step" description: "Training step number. Must be a scalar." type: DT_INT64 } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } summary: "Update \'*var\' according to the proximal adagrad scheme." is_stateful: true } op { name: "ResourceApplyAdam" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "m" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "v" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "beta1_power" description: "Must be a scalar." type_attr: "T" } input_arg { name: "beta2_power" description: "Must be a scalar." type_attr: "T" } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "beta1" description: "Momentum factor. Must be a scalar." type_attr: "T" } input_arg { name: "beta2" description: "Momentum factor. Must be a scalar." type_attr: "T" } input_arg { name: "epsilon" description: "Ridge term. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } attr { name: "use_nesterov" type: "bool" default_value { b: false } description: "If `True`, uses the nesterov update." } summary: "Update \'*var\' according to the Adam algorithm." description: "lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)\nm_t <- beta1 * m_{t-1} + (1 - beta1) * g_t\nv_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t\nvariable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)" is_stateful: true } op { name: "ResourceApplyCenteredRMSProp" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "mg" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "ms" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "mom" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay rate. Must be a scalar." type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" description: "Ridge term. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } summary: "Update \'*var\' according to the centered RMSProp algorithm." description: "The centered RMSProp algorithm uses an estimate of the centered second moment\n(i.e., the variance) for normalization, as opposed to regular RMSProp, which\nuses the (uncentered) second moment. This often helps with training, but is\nslightly more expensive in terms of computation and memory.\n\nNote that in dense implementation of this algorithm, mg, ms, and mom will\nupdate even if the grad is zero, but in this sparse implementation, mg, ms,\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nmean_grad = decay * mean_grad + (1-decay) * gradient\n\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)\n\nmg <- rho * mg_{t-1} + (1-rho) * grad\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)\nvar <- var - mom" is_stateful: true } op { name: "ResourceApplyFtrl" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "linear" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regulariation. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regulariation. Must be a scalar." type_attr: "T" } input_arg { name: "lr_power" description: "Scaling factor. Must be a scalar." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } summary: "Update \'*var\' according to the Ftrl-proximal scheme." description: "accum_new = accum + grad * grad\nlinear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new" is_stateful: true } op { name: "ResourceApplyFtrlV2" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "linear" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regulariation. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 shrinkage regulariation. Must be a scalar." type_attr: "T" } input_arg { name: "l2_shrinkage" type_attr: "T" } input_arg { name: "lr_power" description: "Scaling factor. Must be a scalar." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } summary: "Update \'*var\' according to the Ftrl-proximal scheme." description: "grad_with_shrinkage = grad + 2 * l2_shrinkage * var\naccum_new = accum + grad_with_shrinkage * grad_with_shrinkage\nlinear += grad_with_shrinkage +\n (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new" is_stateful: true } op { name: "ResourceApplyGradientDescent" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "alpha" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "delta" description: "The change." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } summary: "Update \'*var\' by subtracting \'alpha\' * \'delta\' from it." is_stateful: true } op { name: "ResourceApplyMomentum" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "momentum" description: "Momentum. Must be a scalar." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } attr { name: "use_nesterov" type: "bool" default_value { b: false } description: "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum." } summary: "Update \'*var\' according to the momentum scheme. Set use_nesterov = True if you" description: "want to use Nesterov momentum.\n\naccum = accum * momentum + grad\nvar -= lr * accum" is_stateful: true } op { name: "ResourceApplyProximalAdagrad" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } summary: "Update \'*var\' and \'*accum\' according to FOBOS with Adagrad learning rate." description: "accum += grad * grad\nprox_v = var - lr * grad * (1 / sqrt(accum))\nvar = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}" is_stateful: true } op { name: "ResourceApplyProximalGradientDescent" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "alpha" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "delta" description: "The change." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } summary: "Update \'*var\' as FOBOS algorithm with fixed learning rate." description: "prox_v = var - alpha * delta\nvar = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}" is_stateful: true } op { name: "ResourceApplyRMSProp" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "ms" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "mom" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay rate. Must be a scalar." type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" description: "Ridge term. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } summary: "Update \'*var\' according to the RMSProp algorithm." description: "Note that in dense implementation of this algorithm, ms and mom will\nupdate even if the grad is zero, but in this sparse implementation, ms\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon)\n\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)\nvar <- var - mom" is_stateful: true } op { name: "ResourceCountUpTo" input_arg { name: "resource" description: "Should be from a scalar `Variable` node." type: DT_RESOURCE } output_arg { name: "output" description: "A copy of the input before increment. If nothing else modifies the\ninput, the values produced will all be distinct." type_attr: "T" } attr { name: "limit" type: "int" description: "If incrementing ref would bring it above limit, instead generates an\n\'OutOfRange\' error." } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Increments variable pointed to by \'resource\' until it reaches \'limit\'." is_stateful: true } op { name: "ResourceGather" input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "indices" type_attr: "Tindices" } output_arg { name: "output" type_attr: "dtype" } attr { name: "validate_indices" type: "bool" default_value { b: true } } attr { name: "dtype" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Gather slices from the variable pointed to by `resource` according to `indices`." description: "`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).\nProduces an output tensor with shape `indices.shape + params.shape[1:]` where:\n\n```python\n # Scalar indices\n output[:, ..., :] = params[indices, :, ... :]\n\n # Vector indices\n output[i, :, ..., :] = params[indices[i], :, ... :]\n\n # Higher rank indices\n output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]\n```" is_stateful: true } op { name: "ResourceScatterAdd" input_arg { name: "resource" description: "Should be from a `Variable` node." type: DT_RESOURCE } input_arg { name: "indices" description: "A tensor of indices into the first dimension of `ref`." type_attr: "Tindices" } input_arg { name: "updates" description: "A tensor of updated values to add to `ref`." type_attr: "dtype" } attr { name: "dtype" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Adds sparse updates to the variable referenced by `resource`." description: "This operation computes\n\n # Scalar indices\n ref[indices, ...] += updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] += updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions add.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]`.\n\n
\n\n
" is_stateful: true } op { name: "ResourceScatterUpdate" input_arg { name: "resource" description: "Should be from a `Variable` node." type: DT_RESOURCE } input_arg { name: "indices" description: "A tensor of indices into the first dimension of `ref`." type_attr: "Tindices" } input_arg { name: "updates" description: "A tensor of updated values to add to `ref`." type_attr: "dtype" } attr { name: "dtype" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Assigns sparse updates to the variable referenced by `resource`." description: "This operation computes\n\n # Scalar indices\n ref[indices, ...] = updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] = updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]" is_stateful: true } op { name: "ResourceSparseApplyAdadelta" input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum_update" description: ": Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Learning rate. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay factor. Must be a scalar." type_attr: "T" } input_arg { name: "epsilon" description: "Constant factor. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } summary: "var: Should be from a Variable()." is_stateful: true } op { name: "ResourceSparseApplyAdagrad" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Learning rate. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme." description: "That is for rows we have grad for, we update var and accum as follows:\naccum += grad * grad\nvar -= lr * grad * (1 / sqrt(accum))" is_stateful: true } op { name: "ResourceSparseApplyAdagradDA" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "gradient_accumulator" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "gradient_squared_accumulator" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } input_arg { name: "lr" description: "Learning rate. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "global_step" description: "Training step number. Must be a scalar." type: DT_INT64 } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } summary: "Update entries in \'*var\' and \'*accum\' according to the proximal adagrad scheme." is_stateful: true } op { name: "ResourceSparseApplyCenteredRMSProp" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "mg" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "ms" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "mom" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay rate. Must be a scalar." type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" description: "Ridge term. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var, ms and mom." type_attr: "Tindices" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } summary: "Update \'*var\' according to the centered RMSProp algorithm." description: "The centered RMSProp algorithm uses an estimate of the centered second moment\n(i.e., the variance) for normalization, as opposed to regular RMSProp, which\nuses the (uncentered) second moment. This often helps with training, but is\nslightly more expensive in terms of computation and memory.\n\nNote that in dense implementation of this algorithm, mg, ms, and mom will\nupdate even if the grad is zero, but in this sparse implementation, mg, ms,\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nmean_grad = decay * mean_grad + (1-decay) * gradient\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)\n\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)\nvar <- var - mom" is_stateful: true } op { name: "ResourceSparseApplyFtrl" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "linear" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "lr_power" description: "Scaling factor. Must be a scalar." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } summary: "Update relevant entries in \'*var\' according to the Ftrl-proximal scheme." description: "That is for rows we have grad for, we update var, accum and linear as follows:\naccum_new = accum + grad * grad\nlinear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new" is_stateful: true } op { name: "ResourceSparseApplyFtrlV2" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "linear" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 shrinkage regulariation. Must be a scalar." type_attr: "T" } input_arg { name: "l2_shrinkage" type_attr: "T" } input_arg { name: "lr_power" description: "Scaling factor. Must be a scalar." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } summary: "Update relevant entries in \'*var\' according to the Ftrl-proximal scheme." description: "That is for rows we have grad for, we update var, accum and linear as follows:\ngrad_with_shrinkage = grad + 2 * l2_shrinkage * var\naccum_new = accum + grad_with_shrinkage * grad_with_shrinkage\nlinear += grad_with_shrinkage +\n (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new" is_stateful: true } op { name: "ResourceSparseApplyMomentum" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Learning rate. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } input_arg { name: "momentum" description: "Momentum. Must be a scalar." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } attr { name: "use_nesterov" type: "bool" default_value { b: false } description: "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum." } summary: "Update relevant entries in \'*var\' and \'*accum\' according to the momentum scheme." description: "Set use_nesterov = True if you want to use Nesterov momentum.\n\nThat is for rows we have grad for, we update var and accum as follows:\n\naccum = accum * momentum + grad\nvar -= lr * accum" is_stateful: true } op { name: "ResourceSparseApplyProximalAdagrad" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Learning rate. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } summary: "Sparse update entries in \'*var\' and \'*accum\' according to FOBOS algorithm." description: "That is for rows we have grad for, we update var and accum as follows:\naccum += grad * grad\nprox_v = var\nprox_v -= lr * grad * (1 / sqrt(accum))\nvar = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}" is_stateful: true } op { name: "ResourceSparseApplyProximalGradientDescent" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "alpha" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } summary: "Sparse update \'*var\' as FOBOS algorithm with fixed learning rate." description: "That is for rows we have grad for, we update var as follows:\nprox_v = var - alpha * grad\nvar = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}" is_stateful: true } op { name: "ResourceSparseApplyRMSProp" input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "ms" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "mom" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay rate. Must be a scalar." type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" description: "Ridge term. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var, ms and mom." type_attr: "Tindices" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } summary: "Update \'*var\' according to the RMSProp algorithm." description: "Note that in dense implementation of this algorithm, ms and mom will\nupdate even if the grad is zero, but in this sparse implementation, ms\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon)\n\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)\nvar <- var - mom" is_stateful: true } op { name: "ResourceStridedSliceAssign" input_arg { name: "ref" type: DT_RESOURCE } input_arg { name: "begin" type_attr: "Index" } input_arg { name: "end" type_attr: "Index" } input_arg { name: "strides" type_attr: "Index" } input_arg { name: "value" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Index" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "begin_mask" type: "int" default_value { i: 0 } } attr { name: "end_mask" type: "int" default_value { i: 0 } } attr { name: "ellipsis_mask" type: "int" default_value { i: 0 } } attr { name: "new_axis_mask" type: "int" default_value { i: 0 } } attr { name: "shrink_axis_mask" type: "int" default_value { i: 0 } } summary: "Assign `value` to the sliced l-value reference of `ref`." description: "The values of `value` are assigned to the positions in the variable\n`ref` that are selected by the slice parameters. The slice parameters\n`begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.\n\nNOTE this op currently does not support broadcasting and so `value`\'s\nshape must be exactly the shape produced by the slice of `ref`." is_stateful: true } op { name: "Restore" input_arg { name: "file_pattern" description: "Must have a single element. The pattern of the files from\nwhich we read the tensor." type: DT_STRING } input_arg { name: "tensor_name" description: "Must have a single element. The name of the tensor to be\nrestored." type: DT_STRING } output_arg { name: "tensor" description: "The restored tensor." type_attr: "dt" } attr { name: "dt" type: "type" description: "The type of the tensor to be restored." } attr { name: "preferred_shard" type: "int" default_value { i: -1 } description: "Index of file to open first if multiple files match\n`file_pattern`." } summary: "Restores a tensor from checkpoint files." description: "Reads a tensor stored in one or several files. If there are several files (for\ninstance because a tensor was saved as slices), `file_pattern` may contain\nwildcard symbols (`*` and `?`) in the filename portion only, not in the\ndirectory portion.\n\nIf a `file_pattern` matches several files, `preferred_shard` can be used to hint\nin which file the requested tensor is likely to be found. This op will first\nopen the file at index `preferred_shard` in the list of matching files and try\nto restore tensors from that file. Only if some tensors or tensor slices are\nnot found in that first file, then the Op opens all the files. Setting\n`preferred_shard` to match the value passed as the `shard` input\nof a matching `Save` Op may speed up Restore. This attribute only affects\nperformance, not correctness. The default value -1 means files are processed in\norder.\n\nSee also `RestoreSlice`." is_stateful: true } op { name: "RestoreSlice" input_arg { name: "file_pattern" description: "Must have a single element. The pattern of the files from\nwhich we read the tensor." type: DT_STRING } input_arg { name: "tensor_name" description: "Must have a single element. The name of the tensor to be\nrestored." type: DT_STRING } input_arg { name: "shape_and_slice" description: "Scalar. The shapes and slice specifications to use when\nrestoring a tensors." type: DT_STRING } output_arg { name: "tensor" description: "The restored tensor." type_attr: "dt" } attr { name: "dt" type: "type" description: "The type of the tensor to be restored." } attr { name: "preferred_shard" type: "int" default_value { i: -1 } description: "Index of file to open first if multiple files match\n`file_pattern`. See the documentation for `Restore`." } summary: "Restores a tensor from checkpoint files." description: "This is like `Restore` except that restored tensor can be listed as filling\nonly a slice of a larger tensor. `shape_and_slice` specifies the shape of the\nlarger tensor and the slice that the restored tensor covers.\n\nThe `shape_and_slice` input has the same format as the\nelements of the `shapes_and_slices` input of the `SaveSlices` op." is_stateful: true } op { name: "RestoreV2" input_arg { name: "prefix" description: "Must have a single element. The prefix of a V2 checkpoint." type: DT_STRING } input_arg { name: "tensor_names" description: "shape {N}. The names of the tensors to be restored." type: DT_STRING } input_arg { name: "shape_and_slices" description: "shape {N}. The slice specs of the tensors to be restored.\nEmpty strings indicate that they are non-partitioned tensors." type: DT_STRING } output_arg { name: "tensors" description: "shape {N}. The restored tensors, whose shapes are read from the\ncheckpoint directly." type_list_attr: "dtypes" } attr { name: "dtypes" type: "list(type)" description: "shape {N}. The list of expected dtype for the tensors. Must match\nthose stored in the checkpoint." has_minimum: true minimum: 1 } summary: "Restores tensors from a V2 checkpoint." description: "For backward compatibility with the V1 format, this Op currently allows\nrestoring from a V1 checkpoint as well:\n - This Op first attempts to find the V2 index file pointed to by \"prefix\", and\n if found proceed to read it as a V2 checkpoint;\n - Otherwise the V1 read path is invoked.\nRelying on this behavior is not recommended, as the ability to fall back to read\nV1 might be deprecated and eventually removed.\n\nBy default, restores the named tensors in full. If the caller wishes to restore\nspecific slices of stored tensors, \"shape_and_slices\" should be non-empty\nstrings and correspondingly well-formed.\n\nCallers must ensure all the named tensors are indeed stored in the checkpoint." is_stateful: true } op { name: "Reverse" input_arg { name: "tensor" description: "Up to 8-D." type_attr: "T" } input_arg { name: "dims" description: "1-D. The dimensions to reverse." type: DT_BOOL } output_arg { name: "output" description: "The same shape as `tensor`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_BOOL type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_STRING } } } summary: "Reverses specific dimensions of a tensor." description: "Given a `tensor`, and a `bool` tensor `dims` representing the dimensions\nof `tensor`, this operation reverses each dimension i of `tensor` where\n`dims[i]` is `True`.\n\n`tensor` can have up to 8 dimensions. The number of dimensions\nof `tensor` must equal the number of elements in `dims`. In other words:\n\n`rank(tensor) = size(dims)`\n\nFor example:\n\n```\n# tensor \'t\' is [[[[ 0, 1, 2, 3],\n# [ 4, 5, 6, 7],\n# [ 8, 9, 10, 11]],\n# [[12, 13, 14, 15],\n# [16, 17, 18, 19],\n# [20, 21, 22, 23]]]]\n# tensor \'t\' shape is [1, 2, 3, 4]\n\n# \'dims\' is [False, False, False, True]\nreverse(t, dims) ==> [[[[ 3, 2, 1, 0],\n [ 7, 6, 5, 4],\n [ 11, 10, 9, 8]],\n [[15, 14, 13, 12],\n [19, 18, 17, 16],\n [23, 22, 21, 20]]]]\n\n# \'dims\' is [False, True, False, False]\nreverse(t, dims) ==> [[[[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23]\n [[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]]]]\n\n# \'dims\' is [False, False, True, False]\nreverse(t, dims) ==> [[[[8, 9, 10, 11],\n [4, 5, 6, 7],\n [0, 1, 2, 3]]\n [[20, 21, 22, 23],\n [16, 17, 18, 19],\n [12, 13, 14, 15]]]]\n```" } op { name: "ReverseSequence" input_arg { name: "input" description: "The input to reverse." type_attr: "T" } input_arg { name: "seq_lengths" description: "1-D with length `input.dims(batch_dim)` and\n`max(seq_lengths) <= input.dims(seq_dim)`" type_attr: "Tlen" } output_arg { name: "output" description: "The partially reversed input. It has the same shape as `input`." type_attr: "T" } attr { name: "seq_dim" type: "int" description: "The dimension which is partially reversed." } attr { name: "batch_dim" type: "int" default_value { i: 0 } description: "The dimension along which reversal is performed." } attr { name: "T" type: "type" } attr { name: "Tlen" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Reverses variable length slices." description: "This op first slices `input` along the dimension `batch_dim`, and for each\nslice `i`, reverses the first `seq_lengths[i]` elements along\nthe dimension `seq_dim`.\n\nThe elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,\nand `seq_lengths` must be a vector of length `input.dims[batch_dim]`.\n\nThe output slice `i` along dimension `batch_dim` is then given by input\nslice `i`, with the first `seq_lengths[i]` slices along dimension\n`seq_dim` reversed.\n\nFor example:\n\n```\n# Given this:\nbatch_dim = 0\nseq_dim = 1\ninput.dims = (4, 8, ...)\nseq_lengths = [7, 2, 3, 5]\n\n# then slices of input are reversed on seq_dim, but only up to seq_lengths:\noutput[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]\noutput[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]\noutput[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]\noutput[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]\n\n# while entries past seq_lens are copied through:\noutput[0, 7:, :, ...] = input[0, 7:, :, ...]\noutput[1, 2:, :, ...] = input[1, 2:, :, ...]\noutput[2, 3:, :, ...] = input[2, 3:, :, ...]\noutput[3, 2:, :, ...] = input[3, 2:, :, ...]\n```\n\nIn contrast, if:\n\n```\n# Given this:\nbatch_dim = 2\nseq_dim = 0\ninput.dims = (8, ?, 4, ...)\nseq_lengths = [7, 2, 3, 5]\n\n# then slices of input are reversed on seq_dim, but only up to seq_lengths:\noutput[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]\noutput[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]\noutput[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]\noutput[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]\n\n# while entries past seq_lens are copied through:\noutput[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]\noutput[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]\noutput[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]\noutput[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]\n```" } op { name: "ReverseV2" input_arg { name: "tensor" description: "Up to 8-D." type_attr: "T" } input_arg { name: "axis" description: "1-D. The indices of the dimensions to reverse. Must be in the range\n`[-rank(tensor), rank(tensor))`." type_attr: "Tidx" } output_arg { name: "output" description: "The same shape as `tensor`." type_attr: "T" } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_BOOL type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_STRING } } } summary: "Reverses specific dimensions of a tensor." description: "NOTE `tf.reverse` has now changed behavior in preparation for 1.0.\n`tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0.\n\nGiven a `tensor`, and a `int32` tensor `axis` representing the set of\ndimensions of `tensor` to reverse. This operation reverses each dimension\n`i` for which there exists `j` s.t. `axis[j] == i`.\n\n`tensor` can have up to 8 dimensions. The number of dimensions specified\nin `axis` may be 0 or more entries. If an index is specified more than\nonce, a InvalidArgument error is raised.\n\nFor example:\n\n```\n# tensor \'t\' is [[[[ 0, 1, 2, 3],\n# [ 4, 5, 6, 7],\n# [ 8, 9, 10, 11]],\n# [[12, 13, 14, 15],\n# [16, 17, 18, 19],\n# [20, 21, 22, 23]]]]\n# tensor \'t\' shape is [1, 2, 3, 4]\n\n# \'dims\' is [3] or \'dims\' is [-1]\nreverse(t, dims) ==> [[[[ 3, 2, 1, 0],\n [ 7, 6, 5, 4],\n [ 11, 10, 9, 8]],\n [[15, 14, 13, 12],\n [19, 18, 17, 16],\n [23, 22, 21, 20]]]]\n\n# \'dims\' is \'[1]\' (or \'dims\' is \'[-3]\')\nreverse(t, dims) ==> [[[[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23]\n [[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]]]]\n\n# \'dims\' is \'[2]\' (or \'dims\' is \'[-2]\')\nreverse(t, dims) ==> [[[[8, 9, 10, 11],\n [4, 5, 6, 7],\n [0, 1, 2, 3]]\n [[20, 21, 22, 23],\n [16, 17, 18, 19],\n [12, 13, 14, 15]]]]\n```" } op { name: "RightShift" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_UINT32 type: DT_UINT64 } } } summary: "Elementwise computes the bitwise right-shift of `x` and `y`." description: "Performs a logical shift for unsigned integer types, and an arithmetic shift\nfor signed integer types.\n\nIf `y` is negative, or greater than or equal to than the width of `x` in bits\nthe result is implementation defined." is_commutative: true } op { name: "Rint" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } summary: "Returns element-wise integer closest to x." description: "If the result is midway between two representable values,\nthe even representable is chosen.\nFor example:\n\n```\nrint(-1.5) ==> -2.0\nrint(0.5000001) ==> 1.0\nrint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]\n```" } op { name: "Round" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Rounds the values of a tensor to the nearest integer, element-wise." description: "Rounds half to even. Also known as bankers rounding. If you want to round\naccording to the current system rounding mode use std::cint." } op { name: "Rsqrt" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes reciprocal of square root of x element-wise." description: "I.e., \\\\(y = 1 / \\sqrt{x}\\\\)." } op { name: "RsqrtGrad" input_arg { name: "y" type_attr: "T" } input_arg { name: "dy" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes the gradient for the rsqrt of `x` wrt its input." description: "Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`\nis the corresponding input gradient." } op { name: "SampleDistortedBoundingBox" input_arg { name: "image_size" description: "1-D, containing `[height, width, channels]`." type_attr: "T" } input_arg { name: "bounding_boxes" description: "3-D with shape `[batch, N, 4]` describing the N bounding boxes\nassociated with the image." type: DT_FLOAT } output_arg { name: "begin" description: "1-D, containing `[offset_height, offset_width, 0]`. Provide as input to\n`tf.slice`." type_attr: "T" } output_arg { name: "size" description: "1-D, containing `[target_height, target_width, -1]`. Provide as input to\n`tf.slice`." type_attr: "T" } output_arg { name: "bboxes" description: "3-D with shape `[1, 1, 4]` containing the distorted bounding box.\nProvide as input to `tf.image.draw_bounding_boxes`." type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 } } } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either `seed` or `seed2` are set to non-zero, the random number\ngenerator is seeded by the given `seed`. Otherwise, it is seeded by a random\nseed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "min_object_covered" type: "float" default_value { f: 0.1 } description: "The cropped area of the image must contain at least this\nfraction of any bounding box supplied. The value of this parameter should be\nnon-negative. In the case of 0, the cropped area does not need to overlap\nany of the bounding boxes supplied." } attr { name: "aspect_ratio_range" type: "list(float)" default_value { list { f: 0.75 f: 1.33 } } description: "The cropped area of the image must have an aspect ratio =\nwidth / height within this range." } attr { name: "area_range" type: "list(float)" default_value { list { f: 0.05 f: 1 } } description: "The cropped area of the image must contain a fraction of the\nsupplied image within in this range." } attr { name: "max_attempts" type: "int" default_value { i: 100 } description: "Number of attempts at generating a cropped region of the image\nof the specified constraints. After `max_attempts` failures, return the entire\nimage." } attr { name: "use_image_if_no_bounding_boxes" type: "bool" default_value { b: false } description: "Controls behavior if no bounding boxes supplied.\nIf true, assume an implicit bounding box covering the whole input. If false,\nraise an error." } summary: "Generate a single randomly distorted bounding box for an image." description: "Bounding box annotations are often supplied in addition to ground-truth labels\nin image recognition or object localization tasks. A common technique for\ntraining such a system is to randomly distort an image while preserving\nits content, i.e. *data augmentation*. This Op outputs a randomly distorted\nlocalization of an object, i.e. bounding box, given an `image_size`,\n`bounding_boxes` and a series of constraints.\n\nThe output of this Op is a single bounding box that may be used to crop the\noriginal image. The output is returned as 3 tensors: `begin`, `size` and\n`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the\nimage. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize\nwhat the bounding box looks like.\n\nBounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The\nbounding box coordinates are floats in `[0.0, 1.0]` relative to the width and\nheight of the underlying image.\n\nFor example,\n\n```python\n # Generate a single distorted bounding box.\n begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n bounding_boxes=bounding_boxes)\n\n # Draw the bounding box in an image summary.\n image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),\n bbox_for_draw)\n tf.image_summary(\'images_with_box\', image_with_box)\n\n # Employ the bounding box to distort the image.\n distorted_image = tf.slice(image, begin, size)\n```\n\nNote that if no bounding box information is available, setting\n`use_image_if_no_bounding_boxes = true` will assume there is a single implicit\nbounding box covering the whole image. If `use_image_if_no_bounding_boxes` is\nfalse and no bounding boxes are supplied, an error is raised." is_stateful: true } op { name: "SampleDistortedBoundingBoxV2" input_arg { name: "image_size" description: "1-D, containing `[height, width, channels]`." type_attr: "T" } input_arg { name: "bounding_boxes" description: "3-D with shape `[batch, N, 4]` describing the N bounding boxes\nassociated with the image." type: DT_FLOAT } input_arg { name: "min_object_covered" description: "The cropped area of the image must contain at least this\nfraction of any bounding box supplied. The value of this parameter should be\nnon-negative. In the case of 0, the cropped area does not need to overlap\nany of the bounding boxes supplied." type: DT_FLOAT } output_arg { name: "begin" description: "1-D, containing `[offset_height, offset_width, 0]`. Provide as input to\n`tf.slice`." type_attr: "T" } output_arg { name: "size" description: "1-D, containing `[target_height, target_width, -1]`. Provide as input to\n`tf.slice`." type_attr: "T" } output_arg { name: "bboxes" description: "3-D with shape `[1, 1, 4]` containing the distorted bounding box.\nProvide as input to `tf.image.draw_bounding_boxes`." type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 } } } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either `seed` or `seed2` are set to non-zero, the random number\ngenerator is seeded by the given `seed`. Otherwise, it is seeded by a random\nseed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "aspect_ratio_range" type: "list(float)" default_value { list { f: 0.75 f: 1.33 } } description: "The cropped area of the image must have an aspect ratio =\nwidth / height within this range." } attr { name: "area_range" type: "list(float)" default_value { list { f: 0.05 f: 1 } } description: "The cropped area of the image must contain a fraction of the\nsupplied image within in this range." } attr { name: "max_attempts" type: "int" default_value { i: 100 } description: "Number of attempts at generating a cropped region of the image\nof the specified constraints. After `max_attempts` failures, return the entire\nimage." } attr { name: "use_image_if_no_bounding_boxes" type: "bool" default_value { b: false } description: "Controls behavior if no bounding boxes supplied.\nIf true, assume an implicit bounding box covering the whole input. If false,\nraise an error." } summary: "Generate a single randomly distorted bounding box for an image." description: "Bounding box annotations are often supplied in addition to ground-truth labels\nin image recognition or object localization tasks. A common technique for\ntraining such a system is to randomly distort an image while preserving\nits content, i.e. *data augmentation*. This Op outputs a randomly distorted\nlocalization of an object, i.e. bounding box, given an `image_size`,\n`bounding_boxes` and a series of constraints.\n\nThe output of this Op is a single bounding box that may be used to crop the\noriginal image. The output is returned as 3 tensors: `begin`, `size` and\n`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the\nimage. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize\nwhat the bounding box looks like.\n\nBounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The\nbounding box coordinates are floats in `[0.0, 1.0]` relative to the width and\nheight of the underlying image.\n\nFor example,\n\n```python\n # Generate a single distorted bounding box.\n begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n bounding_boxes=bounding_boxes)\n\n # Draw the bounding box in an image summary.\n image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),\n bbox_for_draw)\n tf.image_summary(\'images_with_box\', image_with_box)\n\n # Employ the bounding box to distort the image.\n distorted_image = tf.slice(image, begin, size)\n```\n\nNote that if no bounding box information is available, setting\n`use_image_if_no_bounding_boxes = true` will assume there is a single implicit\nbounding box covering the whole image. If `use_image_if_no_bounding_boxes` is\nfalse and no bounding boxes are supplied, an error is raised." is_stateful: true } op { name: "Save" input_arg { name: "filename" description: "Must have a single element. The name of the file to which we write\nthe tensor." type: DT_STRING } input_arg { name: "tensor_names" description: "Shape `[N]`. The names of the tensors to be saved." type: DT_STRING } input_arg { name: "data" description: "`N` tensors to save." type_list_attr: "T" } attr { name: "T" type: "list(type)" has_minimum: true minimum: 1 } summary: "Saves the input tensors to disk." description: "The size of `tensor_names` must match the number of tensors in `data`. `data[i]`\nis written to `filename` with name `tensor_names[i]`.\n\nSee also `SaveSlices`." is_stateful: true } op { name: "SaveSlices" input_arg { name: "filename" description: "Must have a single element. The name of the file to which we write the\ntensor." type: DT_STRING } input_arg { name: "tensor_names" description: "Shape `[N]`. The names of the tensors to be saved." type: DT_STRING } input_arg { name: "shapes_and_slices" description: "Shape `[N]`. The shapes and slice specifications to use when\nsaving the tensors." type: DT_STRING } input_arg { name: "data" description: "`N` tensors to save." type_list_attr: "T" } attr { name: "T" type: "list(type)" has_minimum: true minimum: 1 } summary: "Saves input tensors slices to disk." description: "This is like `Save` except that tensors can be listed in the saved file as being\na slice of a larger tensor. `shapes_and_slices` specifies the shape of the\nlarger tensor and the slice that this tensor covers. `shapes_and_slices` must\nhave as many elements as `tensor_names`.\n\nElements of the `shapes_and_slices` input must either be:\n\n* The empty string, in which case the corresponding tensor is\n saved normally.\n* A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the\n `dimI` are the dimensions of the larger tensor and `slice-spec`\n specifies what part is covered by the tensor to save.\n\n`slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1`\nwhere each `sliceI` is either:\n\n* The string `-` meaning that the slice covers all indices of this dimension\n* `start,length` where `start` and `length` are integers. In that\n case the slice covers `length` indices starting at `start`.\n\nSee also `Save`." is_stateful: true } op { name: "SaveV2" input_arg { name: "prefix" description: "Must have a single element. The prefix of the V2 checkpoint to which we\nwrite the tensors." type: DT_STRING } input_arg { name: "tensor_names" description: "shape {N}. The names of the tensors to be saved." type: DT_STRING } input_arg { name: "shape_and_slices" description: "shape {N}. The slice specs of the tensors to be saved.\nEmpty strings indicate that they are non-partitioned tensors." type: DT_STRING } input_arg { name: "tensors" description: "`N` tensors to save." type_list_attr: "dtypes" } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } summary: "Saves tensors in V2 checkpoint format." description: "By default, saves the named tensors in full. If the caller wishes to save\nspecific slices of full tensors, \"shape_and_slices\" should be non-empty strings\nand correspondingly well-formed." is_stateful: true } op { name: "ScalarSummary" input_arg { name: "tags" description: "Tags for the summary." type: DT_STRING } input_arg { name: "values" description: "Same shape as `tags. Values for the summary." type_attr: "T" } output_arg { name: "summary" description: "Scalar. Serialized `Summary` protocol buffer." type: DT_STRING } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Outputs a `Summary` protocol buffer with scalar values." description: "The input `tags` and `values` must have the same shape. The generated summary\nhas a summary value for each tag-value pair in `tags` and `values`." } op { name: "ScanDataset" input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "initial_state" type_list_attr: "Tstate" } input_arg { name: "other_arguments" type_list_attr: "Targuments" } output_arg { name: "handle" type: DT_VARIANT } attr { name: "f" type: "func" } attr { name: "Tstate" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "Targuments" type: "list(type)" has_minimum: true } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "Creates a dataset successively reduces `f` over the elements of `input_dataset`." } op { name: "ScatterAdd" input_arg { name: "ref" description: "Should be from a `Variable` node." type_attr: "T" is_ref: true } input_arg { name: "indices" description: "A tensor of indices into the first dimension of `ref`." type_attr: "Tindices" } input_arg { name: "updates" description: "A tensor of updated values to add to `ref`." type_attr: "T" } output_arg { name: "output_ref" description: "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, the addition will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } summary: "Adds sparse updates to a variable reference." description: "This operation computes\n\n # Scalar indices\n ref[indices, ...] += updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] += updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]\n\nThis operation outputs `ref` after the update is done.\nThis makes it easier to chain operations that need to use the reset value.\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions add.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]`.\n\n
\n\n
" } op { name: "ScatterDiv" input_arg { name: "ref" description: "Should be from a `Variable` node." type_attr: "T" is_ref: true } input_arg { name: "indices" description: "A tensor of indices into the first dimension of `ref`." type_attr: "Tindices" } input_arg { name: "updates" description: "A tensor of values that `ref` is divided by." type_attr: "T" } output_arg { name: "output_ref" description: "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, the operation will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } summary: "Divides a variable reference by sparse updates." description: "This operation computes\n\n```python\n # Scalar indices\n ref[indices, ...] /= updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] /= updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]\n```\n\nThis operation outputs `ref` after the update is done.\nThis makes it easier to chain operations that need to use the reset value.\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions divide.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]`." } op { name: "ScatterMul" input_arg { name: "ref" description: "Should be from a `Variable` node." type_attr: "T" is_ref: true } input_arg { name: "indices" description: "A tensor of indices into the first dimension of `ref`." type_attr: "Tindices" } input_arg { name: "updates" description: "A tensor of updated values to multiply to `ref`." type_attr: "T" } output_arg { name: "output_ref" description: "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, the operation will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } summary: "Multiplies sparse updates into a variable reference." description: "This operation computes\n\n```python\n # Scalar indices\n ref[indices, ...] *= updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] *= updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]\n```\n\nThis operation outputs `ref` after the update is done.\nThis makes it easier to chain operations that need to use the reset value.\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions multiply.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]`." } op { name: "ScatterNd" input_arg { name: "indices" description: "Index tensor." type_attr: "Tindices" } input_arg { name: "updates" description: "Updates to scatter into output." type_attr: "T" } input_arg { name: "shape" description: "1-D. The shape of the resulting tensor." type_attr: "Tindices" } output_arg { name: "output" description: "A new tensor with the given shape and updates applied according\nto the indices." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Scatter `updates` into a new (initially zero) tensor according to `indices`." description: "Creates a new tensor by applying sparse `updates` to individual\nvalues or slices within a zero tensor of the given `shape` according to\nindices. This operator is the inverse of the @{tf.gather_nd} operator which\nextracts values or slices from a given tensor.\n\n**WARNING**: The order in which updates are applied is nondeterministic, so the\noutput will be nondeterministic if `indices` contains duplicates.\n\n`indices` is an integer tensor containing indices into a new tensor of shape\n`shape`. The last dimension of `indices` can be at most the rank of `shape`:\n\n indices.shape[-1] <= shape.rank\n\nThe last dimension of `indices` corresponds to indices into elements\n(if `indices.shape[-1] = shape.rank`) or slices\n(if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of\n`shape`. `updates` is a tensor with shape\n\n indices.shape[:-1] + shape[indices.shape[-1]:]\n\nThe simplest form of scatter is to insert individual elements in a tensor by\nindex. For example, say we want to insert 4 scattered elements in a rank-1\ntensor with 8 elements.\n\n
\n\n
\n\nIn Python, this scatter operation would look like this:\n\n```python\n indices = tf.constant([[4], [3], [1], [7]])\n updates = tf.constant([9, 10, 11, 12])\n shape = tf.constant([8])\n scatter = tf.scatter_nd(indices, updates, shape)\n with tf.Session() as sess:\n print(sess.run(scatter))\n```\n\nThe resulting tensor would look like this:\n\n [0, 11, 0, 10, 9, 0, 0, 12]\n\nWe can also, insert entire slices of a higher rank tensor all at once. For\nexample, if we wanted to insert two slices in the first dimension of a\nrank-3 tensor with two matrices of new values.\n\n
\n\n
\n\nIn Python, this scatter operation would look like this:\n\n```python\n indices = tf.constant([[0], [2]])\n updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],\n [7, 7, 7, 7], [8, 8, 8, 8]],\n [[5, 5, 5, 5], [6, 6, 6, 6],\n [7, 7, 7, 7], [8, 8, 8, 8]]])\n shape = tf.constant([4, 4, 4])\n scatter = tf.scatter_nd(indices, updates, shape)\n with tf.Session() as sess:\n print(sess.run(scatter))\n```\n\nThe resulting tensor would look like this:\n\n [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]" } op { name: "ScatterNdAdd" input_arg { name: "ref" description: "A mutable Tensor. Should be from a Variable node." type_attr: "T" is_ref: true } input_arg { name: "indices" description: "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref." type_attr: "Tindices" } input_arg { name: "updates" description: "A Tensor. Must have the same type as ref. A tensor of updated values\nto add to ref." type_attr: "T" } output_arg { name: "output_ref" description: "Same as ref. Returned as a convenience for operations that want\nto use the updated values after the update is done." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention." } summary: "Applies sparse addition between `updates` and individual values or slices" description: "within a given variable according to `indices`.\n\n`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n```\n\nFor example, say we want to add 4 scattered elements to a rank-1 tensor to 8\nelements. In Python, that addition would look like this:\n\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1], [7]])\n updates = tf.constant([9, 10, 11, 12])\n add = tf.scatter_nd_add(ref, indices, updates)\n with tf.Session() as sess:\n print sess.run(add)\n\nThe resulting update to ref would look like this:\n\n [1, 13, 3, 14, 14, 6, 7, 20]\n\nSee @{tf.scatter_nd} for more details about how to make updates to\nslices." } op { name: "ScatterNdNonAliasingAdd" input_arg { name: "input" description: "A Tensor." type_attr: "T" } input_arg { name: "indices" description: "A Tensor. Must be one of the following types: `int32`, `int64`.\nA tensor of indices into `input`." type_attr: "Tindices" } input_arg { name: "updates" description: "A Tensor. Must have the same type as ref. A tensor of updated values\nto add to `input`." type_attr: "T" } output_arg { name: "output" description: "A `Tensor` with the same shape as `input`, containing values of `input`\nupdated with `updates`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Applies sparse addition to `input` using individual values or slices" description: "from `updates` according to indices `indices`. The updates are non-aliasing:\n`input` is only modified in-place if no other operations will use it.\nOtherwise, a copy of `input` is made. This operation has a gradient with\nrespect to both `input` and `updates`.\n\n`input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `input`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or `(P-K)`-dimensional slices\n(if `K < P`) along the `K`th dimension of `input`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].\n```\n\nFor example, say we want to add 4 scattered elements to a rank-1 tensor to 8\nelements. In Python, that addition would look like this:\n\n input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1], [7]])\n updates = tf.constant([9, 10, 11, 12])\n output = tf.scatter_nd_non_aliasing_add(input, indices, updates)\n with tf.Session() as sess:\n print(sess.run(output))\n\nThe resulting value `output` would look like this:\n\n [1, 13, 3, 14, 14, 6, 7, 20]\n\nSee @{tf.scatter_nd} for more details about how to make updates to slices." } op { name: "ScatterNdSub" input_arg { name: "ref" description: "A mutable Tensor. Should be from a Variable node." type_attr: "T" is_ref: true } input_arg { name: "indices" description: "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref." type_attr: "Tindices" } input_arg { name: "updates" description: "A Tensor. Must have the same type as ref. A tensor of updated values\nto subtract from ref." type_attr: "T" } output_arg { name: "output_ref" description: "Same as ref. Returned as a convenience for operations that want\nto use the updated values after the update is done." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention." } summary: "Applies sparse subtraction between `updates` and individual values or slices" description: "within a given variable according to `indices`.\n\n`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n```\n\nFor example, say we want to subtract 4 scattered elements from a rank-1 tensor\nwith 8 elements. In Python, that subtraction would look like this:\n\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1], [7]])\n updates = tf.constant([9, 10, 11, 12])\n sub = tf.scatter_nd_sub(ref, indices, updates)\n with tf.Session() as sess:\n print sess.run(sub)\n\nThe resulting update to ref would look like this:\n\n [1, -9, 3, -6, -4, 6, 7, -4]\n\nSee @{tf.scatter_nd} for more details about how to make updates to\nslices." } op { name: "ScatterNdUpdate" input_arg { name: "ref" description: "A mutable Tensor. Should be from a Variable node." type_attr: "T" is_ref: true } input_arg { name: "indices" description: "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref." type_attr: "Tindices" } input_arg { name: "updates" description: "A Tensor. Must have the same type as ref. A tensor of updated\nvalues to add to ref." type_attr: "T" } output_arg { name: "output_ref" description: "Same as ref. Returned as a convenience for operations that want to\nuse the updated values after the update is done." type_attr: "T" is_ref: true } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: true } description: "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention." } summary: "Applies sparse `updates` to individual values or slices within a given" description: "variable according to `indices`.\n\n`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n```\n\nFor example, say we want to update 4 scattered elements to a rank-1 tensor to\n8 elements. In Python, that update would look like this:\n\n```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n update = tf.scatter_nd_update(ref, indices, updates)\n with tf.Session() as sess:\n print sess.run(update)\n```\n\nThe resulting update to ref would look like this:\n\n [1, 11, 3, 10, 9, 6, 7, 12]\n\nSee @{tf.scatter_nd} for more details about how to make updates to\nslices." } op { name: "ScatterSub" input_arg { name: "ref" description: "Should be from a `Variable` node." type_attr: "T" is_ref: true } input_arg { name: "indices" description: "A tensor of indices into the first dimension of `ref`." type_attr: "Tindices" } input_arg { name: "updates" description: "A tensor of updated values to subtract from `ref`." type_attr: "T" } output_arg { name: "output_ref" description: "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } summary: "Subtracts sparse updates to a variable reference." description: "```python\n # Scalar indices\n ref[indices, ...] -= updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] -= updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]\n```\n\nThis operation outputs `ref` after the update is done.\nThis makes it easier to chain operations that need to use the reset value.\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their (negated) contributions add.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]`.\n\n
\n\n
" } op { name: "ScatterUpdate" input_arg { name: "ref" description: "Should be from a `Variable` node." type_attr: "T" is_ref: true } input_arg { name: "indices" description: "A tensor of indices into the first dimension of `ref`." type_attr: "Tindices" } input_arg { name: "updates" description: "A tensor of updated values to store in `ref`." type_attr: "T" } output_arg { name: "output_ref" description: "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done." type_attr: "T" is_ref: true } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: true } description: "If True, the assignment will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } summary: "Applies sparse updates to a variable reference." description: "This operation computes\n\n```python\n # Scalar indices\n ref[indices, ...] = updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] = updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]\n```\n\nThis operation outputs `ref` after the update is done.\nThis makes it easier to chain operations that need to use the reset value.\n\nIf values in `ref` is to be updated more than once, because there are\nduplicate entries in `indices`, the order at which the updates happen\nfor each value is undefined.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]`.\n\n
\n\n
" } op { name: "SdcaFprint" input_arg { name: "input" description: "vector of strings to compute fingerprints on." type: DT_STRING } output_arg { name: "output" description: "a (N,2) shaped matrix where N is the number of elements in the input\nvector. Each row contains the low and high parts of the fingerprint." type: DT_INT64 } summary: "Computes fingerprints of the input strings." } op { name: "SdcaOptimizer" input_arg { name: "sparse_example_indices" description: "a list of vectors which contain example indices." type: DT_INT64 number_attr: "num_sparse_features" } input_arg { name: "sparse_feature_indices" description: "a list of vectors which contain feature indices." type: DT_INT64 number_attr: "num_sparse_features" } input_arg { name: "sparse_feature_values" description: "a list of vectors which contains feature value\nassociated with each feature group." type: DT_FLOAT number_attr: "num_sparse_features_with_values" } input_arg { name: "dense_features" description: "a list of matrices which contains the dense feature values." type: DT_FLOAT number_attr: "num_dense_features" } input_arg { name: "example_weights" description: "a vector which contains the weight associated with each\nexample." type: DT_FLOAT } input_arg { name: "example_labels" description: "a vector which contains the label/target associated with each\nexample." type: DT_FLOAT } input_arg { name: "sparse_indices" description: "a list of vectors where each value is the indices which has\ncorresponding weights in sparse_weights. This field maybe omitted for the\ndense approach." type: DT_INT64 number_attr: "num_sparse_features" } input_arg { name: "sparse_weights" description: "a list of vectors where each value is the weight associated with\na sparse feature group." type: DT_FLOAT number_attr: "num_sparse_features" } input_arg { name: "dense_weights" description: "a list of vectors where the values are the weights associated\nwith a dense feature group." type: DT_FLOAT number_attr: "num_dense_features" } input_arg { name: "example_state_data" description: "a list of vectors containing the example state data." type: DT_FLOAT } output_arg { name: "out_example_state_data" description: "a list of vectors containing the updated example state\ndata." type: DT_FLOAT } output_arg { name: "out_delta_sparse_weights" description: "a list of vectors where each value is the delta\nweights associated with a sparse feature group." type: DT_FLOAT number_attr: "num_sparse_features" } output_arg { name: "out_delta_dense_weights" description: "a list of vectors where the values are the delta\nweights associated with a dense feature group." type: DT_FLOAT number_attr: "num_dense_features" } attr { name: "loss_type" type: "string" description: "Type of the primal loss. Currently SdcaSolver supports logistic,\nsquared and hinge losses." allowed_values { list { s: "logistic_loss" s: "squared_loss" s: "hinge_loss" s: "smooth_hinge_loss" } } } attr { name: "adaptative" type: "bool" default_value { b: false } description: "Whether to use Adapative SDCA for the inner loop." } attr { name: "num_sparse_features" type: "int" description: "Number of sparse feature groups to train on." has_minimum: true } attr { name: "num_sparse_features_with_values" type: "int" description: "Number of sparse feature groups with values\nassociated with it, otherwise implicitly treats values as 1.0." has_minimum: true } attr { name: "num_dense_features" type: "int" description: "Number of dense feature groups to train on." has_minimum: true } attr { name: "l1" type: "float" description: "Symmetric l1 regularization strength." } attr { name: "l2" type: "float" description: "Symmetric l2 regularization strength." } attr { name: "num_loss_partitions" type: "int" description: "Number of partitions of the global loss function." has_minimum: true minimum: 1 } attr { name: "num_inner_iterations" type: "int" description: "Number of iterations per mini-batch." has_minimum: true minimum: 1 } summary: "Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for" description: "linear models with L1 + L2 regularization. As global optimization objective is\nstrongly-convex, the optimizer optimizes the dual objective at each step. The\noptimizer applies each update one example at a time. Examples are sampled\nuniformly, and the optimizer is learning rate free and enjoys linear convergence\nrate.\n\n[Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).
\nShai Shalev-Shwartz, Tong Zhang. 2012\n\n$$Loss Objective = \\sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$\n\n[Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).
\nChenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,\nPeter Richtarik, Martin Takac. 2015\n\n[Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).
\nDominik Csiba, Zheng Qu, Peter Richtarik. 2015" } op { name: "SdcaShrinkL1" input_arg { name: "weights" description: "a list of vectors where each value is the weight associated with a\nfeature group." type: DT_FLOAT number_attr: "num_features" is_ref: true } attr { name: "num_features" type: "int" description: "Number of feature groups to apply shrinking step." has_minimum: true } attr { name: "l1" type: "float" description: "Symmetric l1 regularization strength." } attr { name: "l2" type: "float" description: "Symmetric l2 regularization strength. Should be a positive float." } summary: "Applies L1 regularization shrink step on the parameters." } op { name: "SegmentMax" input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension. Values should be sorted and can be repeated." type_attr: "Tindices" } output_arg { name: "output" description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Computes the maximum along segments of a tensor." description: "Read @{$math_ops#segmentation$the section on segmentation} for an explanation of\nsegments.\n\nComputes a tensor such that\n\\\\(output_i = \\max_j(data_j)\\\\) where `max` is over `j` such\nthat `segment_ids[j] == i`.\n\nIf the max is empty for a given segment ID `i`, `output[i] = 0`.\n\n
\n\n
" } op { name: "SegmentMean" input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension. Values should be sorted and can be repeated." type_attr: "Tindices" } output_arg { name: "output" description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Computes the mean along segments of a tensor." description: "Read @{$math_ops#segmentation$the section on segmentation} for an explanation of\nsegments.\n\nComputes a tensor such that\n\\\\(output_i = \\frac{\\sum_j data_j}{N}\\\\) where `mean` is\nover `j` such that `segment_ids[j] == i` and `N` is the total number of\nvalues summed.\n\nIf the mean is empty for a given segment ID `i`, `output[i] = 0`.\n\n
\n\n
" } op { name: "SegmentMin" input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension. Values should be sorted and can be repeated." type_attr: "Tindices" } output_arg { name: "output" description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Computes the minimum along segments of a tensor." description: "Read @{$math_ops#segmentation$the section on segmentation} for an explanation of\nsegments.\n\nComputes a tensor such that\n\\\\(output_i = \\min_j(data_j)\\\\) where `min` is over `j` such\nthat `segment_ids[j] == i`.\n\nIf the min is empty for a given segment ID `i`, `output[i] = 0`.\n\n
\n\n
" } op { name: "SegmentProd" input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension. Values should be sorted and can be repeated." type_attr: "Tindices" } output_arg { name: "output" description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Computes the product along segments of a tensor." description: "Read @{$math_ops#segmentation$the section on segmentation} for an explanation of\nsegments.\n\nComputes a tensor such that\n\\\\(output_i = \\prod_j data_j\\\\) where the product is over `j` such\nthat `segment_ids[j] == i`.\n\nIf the product is empty for a given segment ID `i`, `output[i] = 1`.\n\n
\n\n
" } op { name: "SegmentSum" input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension. Values should be sorted and can be repeated." type_attr: "Tindices" } output_arg { name: "output" description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Computes the sum along segments of a tensor." description: "Read @{$math_ops#segmentation$the section on segmentation} for an explanation of\nsegments.\n\nComputes a tensor such that\n\\\\(output_i = \\sum_j data_j\\\\) where sum is over `j` such\nthat `segment_ids[j] == i`.\n\nIf the sum is empty for a given segment ID `i`, `output[i] = 0`.\n\n
\n\n
" } op { name: "Select" input_arg { name: "condition" type: DT_BOOL } input_arg { name: "t" description: "= A `Tensor` which may have the same shape as `condition`.\nIf `condition` is rank 1, `t` may have higher rank,\nbut its first dimension must match the size of `condition`." type_attr: "T" } input_arg { name: "e" description: "= A `Tensor` with the same type and shape as `t`." type_attr: "T" } output_arg { name: "output" description: "= A `Tensor` with the same type and shape as `t` and `e`." type_attr: "T" } attr { name: "T" type: "type" } summary: "Selects elements from `t` or `e`, depending on `condition`." description: "The `t`, and `e` tensors must all have the same shape, and the\noutput will also have that shape.\n\nThe `condition` tensor must be a scalar if `t` and `e` are scalars.\nIf `t` and `e` are vectors or higher rank, then `condition` must be either a\nscalar, a vector with size matching the first dimension of `t`, or must have\nthe same shape as `t`.\n\nThe `condition` tensor acts as a mask that chooses, based on the value at each\nelement, whether the corresponding element / row in the output should be\ntaken from `t` (if true) or `e` (if false).\n\nIf `condition` is a vector and `t` and `e` are higher rank matrices, then\nit chooses which row (outer dimension) to copy from `t` and `e`.\nIf `condition` has the same shape as `t` and `e`, then it chooses which\nelement to copy from `t` and `e`.\n\nFor example:\n\n```python\n# \'condition\' tensor is [[True, False]\n# [False, True]]\n# \'t\' is [[1, 2],\n# [3, 4]]\n# \'e\' is [[5, 6],\n# [7, 8]]\nselect(condition, t, e) # => [[1, 6], [7, 4]]\n\n\n# \'condition\' tensor is [True, False]\n# \'t\' is [[1, 2],\n# [3, 4]]\n# \'e\' is [[5, 6],\n# [7, 8]]\nselect(condition, t, e) ==> [[1, 2],\n [7, 8]]\n\n```" } op { name: "SelfAdjointEig" input_arg { name: "input" description: "Shape is `[..., M, M]`." type_attr: "T" } output_arg { name: "output" description: "Shape is `[..., M+1, M]`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } summary: "Computes the Eigen Decomposition of a batch of square self-adjoint matrices." description: "The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices, with the same constraints as the single matrix\nSelfAdjointEig.\n\nThe result is a [..., M+1, M] matrix with [..., 0,:] containing the\neigenvalues, and subsequent [...,1:, :] containing the eigenvectors." deprecation { version: 11 explanation: "Use SelfAdjointEigV2 instead." } } op { name: "SelfAdjointEigV2" input_arg { name: "input" description: "`Tensor` input of shape `[N, N]`." type_attr: "T" } output_arg { name: "e" description: "Eigenvalues. Shape is `[N]`." type_attr: "T" } output_arg { name: "v" description: "Eigenvectors. Shape is `[N, N]`." type_attr: "T" } attr { name: "compute_v" type: "bool" default_value { b: true } description: "If `True` then eigenvectors will be computed and returned in `v`.\nOtherwise, only the eigenvalues will be computed." } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes the eigen decomposition of one or more square self-adjoint matrices." description: "Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in\n`input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`.\n\n```python\n# a is a tensor.\n# e is a tensor of eigenvalues.\n# v is a tensor of eigenvectors.\ne, v = self_adjoint_eig(a)\ne = self_adjoint_eig(a, compute_v=False)\n```" } op { name: "Selu" input_arg { name: "features" type_attr: "T" } output_arg { name: "activations" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } summary: "Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`" description: "if < 0, `scale * features` otherwise.\n\nSee [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)" } op { name: "SeluGrad" input_arg { name: "gradients" description: "The backpropagated gradients to the corresponding Selu operation." type_attr: "T" } input_arg { name: "outputs" description: "The outputs of the corresponding Selu operation." type_attr: "T" } output_arg { name: "backprops" description: "The gradients: `gradients * (outputs + scale * alpha)`\nif outputs < 0, `scale * gradients` otherwise." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } summary: "Computes gradients for the scaled exponential linear (Selu) operation." } op { name: "SerializeIterator" input_arg { name: "resource_handle" description: "A handle to an iterator resource." type: DT_RESOURCE } output_arg { name: "serialized" description: "A variant tensor storing the state of the iterator contained in the\nresource." type: DT_VARIANT } summary: "Converts the given `resource_handle` representing an iterator to a variant tensor." is_stateful: true } op { name: "SerializeManySparse" input_arg { name: "sparse_indices" description: "2-D. The `indices` of the minibatch `SparseTensor`." type: DT_INT64 } input_arg { name: "sparse_values" description: "1-D. The `values` of the minibatch `SparseTensor`." type_attr: "T" } input_arg { name: "sparse_shape" description: "1-D. The `shape` of the minibatch `SparseTensor`." type: DT_INT64 } output_arg { name: "serialized_sparse" type: DT_STRING } attr { name: "T" type: "type" } summary: "Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` string `Tensor`." description: "The `SparseTensor` must have rank `R` greater than 1, and the first dimension\nis treated as the minibatch dimension. Elements of the `SparseTensor`\nmust be sorted in increasing order of this first dimension. The serialized\n`SparseTensor` objects going into each row of `serialized_sparse` will have\nrank `R-1`.\n\nThe minibatch size `N` is extracted from `sparse_shape[0]`." } op { name: "SerializeSparse" input_arg { name: "sparse_indices" description: "2-D. The `indices` of the `SparseTensor`." type: DT_INT64 } input_arg { name: "sparse_values" description: "1-D. The `values` of the `SparseTensor`." type_attr: "T" } input_arg { name: "sparse_shape" description: "1-D. The `shape` of the `SparseTensor`." type: DT_INT64 } output_arg { name: "serialized_sparse" type: DT_STRING } attr { name: "T" type: "type" } summary: "Serialize a `SparseTensor` into a string 3-vector (1-D `Tensor`) object." } op { name: "SerializeTensor" input_arg { name: "tensor" description: "A Tensor of type `T`." type_attr: "T" } output_arg { name: "serialized" description: "A serialized TensorProto proto of the input tensor." type: DT_STRING } attr { name: "T" type: "type" description: "The type of the input tensor." } summary: "Transforms a Tensor into a serialized TensorProto proto." } op { name: "SetSize" input_arg { name: "set_indices" description: "2D `Tensor`, indices of a `SparseTensor`." type: DT_INT64 } input_arg { name: "set_values" description: "1D `Tensor`, values of a `SparseTensor`." type_attr: "T" } input_arg { name: "set_shape" description: "1D `Tensor`, shape of a `SparseTensor`." type: DT_INT64 } output_arg { name: "size" description: "For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st\n`n-1` dimensions as `set`. Each value is the number of unique elements in\nthe corresponding `[0...n-1]` dimension of `set`." type: DT_INT32 } attr { name: "validate_indices" type: "bool" default_value { b: true } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_STRING } } } summary: "Number of unique elements along last dimension of input `set`." description: "Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`,\nand `set_shape`. The last dimension contains values in a set, duplicates are\nallowed but ignored.\n\nIf `validate_indices` is `True`, this op validates the order and range of `set`\nindices." } op { name: "Shape" input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "out_type" } attr { name: "T" type: "type" } attr { name: "out_type" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Returns the shape of a tensor." description: "This operation returns a 1-D integer tensor representing the shape of `input`.\n\nFor example:\n\n```\n# \'t\' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]\nshape(t) ==> [2, 2, 3]\n```" } op { name: "ShapeN" input_arg { name: "input" type_attr: "T" number_attr: "N" } output_arg { name: "output" type_attr: "out_type" number_attr: "N" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } attr { name: "out_type" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Returns shape of tensors." description: "This operation returns N 1-D integer tensors representing shape of `input[i]s`." } op { name: "ShardedFilename" input_arg { name: "basename" type: DT_STRING } input_arg { name: "shard" type: DT_INT32 } input_arg { name: "num_shards" type: DT_INT32 } output_arg { name: "filename" type: DT_STRING } summary: "Generate a sharded filename. The filename is printf formatted as" description: " %s-%05d-of-%05d, basename, shard, num_shards." } op { name: "ShardedFilespec" input_arg { name: "basename" type: DT_STRING } input_arg { name: "num_shards" type: DT_INT32 } output_arg { name: "filename" type: DT_STRING } summary: "Generate a glob pattern matching all sharded file names." } op { name: "ShuffleDataset" input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "buffer_size" description: "The number of output elements to buffer in an iterator over\nthis dataset. Compare with the `min_after_dequeue` attr when creating a\n`RandomShuffleQueue`." type: DT_INT64 } input_arg { name: "seed" description: "A scalar seed for the random number generator. If either seed or\nseed2 is set to be non-zero, the random number generator is seeded\nby the given seed. Otherwise, a random seed is used." type: DT_INT64 } input_arg { name: "seed2" description: "A second scalar seed to avoid seed collision." type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "reshuffle_each_iteration" type: "bool" default_value { b: true } description: "If true, each iterator over this dataset will be given\na different pseudorandomly generated seed, based on a sequence seeded by the\n`seed` and `seed2` inputs. If false, each iterator will be given the same\nseed, and repeated iteration over this dataset will yield the exact same\nsequence of results." } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "Creates a dataset that shuffles elements from `input_dataset` pseudorandomly." } op { name: "Sigmoid" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes sigmoid of `x` element-wise." description: "Specifically, `y = 1 / (1 + exp(-x))`." } op { name: "SigmoidGrad" input_arg { name: "y" type_attr: "T" } input_arg { name: "dy" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes the gradient of the sigmoid of `x` wrt its input." description: "Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and\n`dy` is the corresponding input gradient." } op { name: "Sign" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Returns an element-wise indication of the sign of a number." description: "`y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.\n\nFor complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`." } op { name: "Sin" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes sin of x element-wise." } op { name: "Sinh" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes hyperbolic sine of x element-wise." } op { name: "Size" input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "out_type" } attr { name: "T" type: "type" } attr { name: "out_type" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Returns the size of a tensor." description: "This operation returns an integer representing the number of elements in\n`input`.\n\nFor example:\n\n```\n# \'t\' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]\nsize(t) ==> 12\n```" } op { name: "SkipDataset" input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "count" description: "A scalar representing the number of elements from the `input_dataset`\nthat should be skipped. If count is -1, skips everything." type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "Creates a dataset that skips `count` elements from the `input_dataset`." } op { name: "Skipgram" output_arg { name: "vocab_word" description: "A vector of words in the corpus." type: DT_STRING } output_arg { name: "vocab_freq" description: "Frequencies of words. Sorted in the non-ascending order." type: DT_INT32 } output_arg { name: "words_per_epoch" description: "Number of words per epoch in the data file." type: DT_INT64 } output_arg { name: "current_epoch" description: "The current epoch number." type: DT_INT32 } output_arg { name: "total_words_processed" description: "The total number of words processed so far." type: DT_INT64 } output_arg { name: "examples" description: "A vector of word ids." type: DT_INT32 } output_arg { name: "labels" description: "A vector of word ids." type: DT_INT32 } attr { name: "filename" type: "string" description: "The corpus\'s text file name." } attr { name: "batch_size" type: "int" description: "The size of produced batch." } attr { name: "window_size" type: "int" default_value { i: 5 } description: "The number of words to predict to the left and right of the target." } attr { name: "min_count" type: "int" default_value { i: 5 } description: "The minimum number of word occurrences for it to be included in the\nvocabulary." } attr { name: "subsample" type: "float" default_value { f: 0.001 } description: "Threshold for word occurrence. Words that appear with higher\nfrequency will be randomly down-sampled. Set to 0 to disable." } summary: "Parses a text file and creates a batch of examples." deprecation { version: 19 explanation: "Moving word2vec into tensorflow_models/tutorials and deprecating its ops here as a result" } is_stateful: true } op { name: "Slice" input_arg { name: "input" type_attr: "T" } input_arg { name: "begin" description: "begin[i] specifies the offset into the \'i\'th dimension of\n\'input\' to slice from." type_attr: "Index" } input_arg { name: "size" description: "size[i] specifies the number of elements of the \'i\'th dimension\nof \'input\' to slice. If size[i] is -1, all remaining elements in dimension\ni are included in the slice (i.e. this is equivalent to setting\nsize[i] = input.dim_size(i) - begin[i])." type_attr: "Index" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Index" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Return a slice from \'input\'." description: "The output tensor is a tensor with dimensions described by \'size\'\nwhose values are extracted from \'input\' starting at the offsets in\n\'begin\'.\n\n*Requirements*:\n 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n)" } op { name: "Softmax" input_arg { name: "logits" description: "2-D with shape `[batch_size, num_classes]`." type_attr: "T" } output_arg { name: "softmax" description: "Same shape as `logits`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } summary: "Computes softmax activations." description: "For each batch `i` and class `j` we have\n\n softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))" } op { name: "SoftmaxCrossEntropyWithLogits" input_arg { name: "features" description: "batch_size x num_classes matrix" type_attr: "T" } input_arg { name: "labels" description: "batch_size x num_classes matrix\nThe caller must ensure that each batch of labels represents a valid\nprobability distribution." type_attr: "T" } output_arg { name: "loss" description: "Per example loss (batch_size vector)." type_attr: "T" } output_arg { name: "backprop" description: "backpropagated gradients (batch_size x num_classes matrix)." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } summary: "Computes softmax cross entropy cost and gradients to backpropagate." description: "Inputs are the logits, not probabilities." } op { name: "Softplus" input_arg { name: "features" type_attr: "T" } output_arg { name: "activations" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Computes softplus: `log(exp(features) + 1)`." } op { name: "SoftplusGrad" input_arg { name: "gradients" description: "The backpropagated gradients to the corresponding softplus operation." type_attr: "T" } input_arg { name: "features" description: "The features passed as input to the corresponding softplus operation." type_attr: "T" } output_arg { name: "backprops" description: "The gradients: `gradients / (1 + exp(-features))`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Computes softplus gradients for a softplus operation." } op { name: "Softsign" input_arg { name: "features" type_attr: "T" } output_arg { name: "activations" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Computes softsign: `features / (abs(features) + 1)`." } op { name: "SoftsignGrad" input_arg { name: "gradients" description: "The backpropagated gradients to the corresponding softsign operation." type_attr: "T" } input_arg { name: "features" description: "The features passed as input to the corresponding softsign operation." type_attr: "T" } output_arg { name: "backprops" description: "The gradients: `gradients / (1 + abs(features)) ** 2`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Computes softsign gradients for a softsign operation." } op { name: "SpaceToBatch" input_arg { name: "input" description: "4-D with shape `[batch, height, width, depth]`." type_attr: "T" } input_arg { name: "paddings" description: "2-D tensor of non-negative integers with shape `[2, 2]`. It specifies\n the padding of the input with zeros across the spatial dimensions as follows:\n\n paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]\n\n The effective spatial dimensions of the zero-padded input tensor will be:\n\n height_pad = pad_top + height + pad_bottom\n width_pad = pad_left + width + pad_right\n\nThe attr `block_size` must be greater than one. It indicates the block size.\n\n * Non-overlapping blocks of size `block_size x block size` in the height and\n width dimensions are rearranged into the batch dimension at each location.\n * The batch of the output tensor is `batch * block_size * block_size`.\n * Both height_pad and width_pad must be divisible by block_size.\n\nThe shape of the output will be:\n\n [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,\n depth]\n\nSome examples:\n\n(1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:\n\n```\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 1]` and value:\n\n```\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\n(2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:\n\n```\nx = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 3]` and value:\n\n```\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\n(3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:\n\n```\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```\n\nThe output tensor has shape `[4, 2, 2, 1]` and value:\n\n```\nx = [[[[1], [3]], [[9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n```\n\n(4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:\n\n```\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]]],\n [[[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```\n\nThe output tensor has shape `[8, 1, 2, 1]` and value:\n\n```\nx = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],\n [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]\n```\n\nAmong others, this operation is useful for reducing atrous convolution into\nregular convolution." type_attr: "Tpaddings" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tpaddings" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "block_size" type: "int" has_minimum: true minimum: 2 } summary: "SpaceToBatch for 4-D tensors of type T." description: "This is a legacy version of the more general SpaceToBatchND.\n\nZero-pads and then rearranges (permutes) blocks of spatial data into batch.\nMore specifically, this op outputs a copy of the input tensor where values from\nthe `height` and `width` dimensions are moved to the `batch` dimension. After\nthe zero-padding, both `height` and `width` of the input must be divisible by the\nblock size." } op { name: "SpaceToBatchND" input_arg { name: "input" description: "N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\nwhere spatial_shape has `M` dimensions." type_attr: "T" } input_arg { name: "block_shape" description: "1-D with shape `[M]`, all values must be >= 1." type_attr: "Tblock_shape" } input_arg { name: "paddings" description: "2-D with shape `[M, 2]`, all values must be >= 0.\n `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension\n `i + 1`, which corresponds to spatial dimension `i`. It is required that\n `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.\n\nThis operation is equivalent to the following steps:\n\n1. Zero-pad the start and end of dimensions `[1, ..., M]` of the\n input according to `paddings` to produce `padded` of shape `padded_shape`.\n\n2. Reshape `padded` to `reshaped_padded` of shape:\n\n [batch] +\n [padded_shape[1] / block_shape[0],\n block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1],\n block_shape[M-1]] +\n remaining_shape\n\n3. Permute dimensions of `reshaped_padded` to produce\n `permuted_reshaped_padded` of shape:\n\n block_shape +\n [batch] +\n [padded_shape[1] / block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1]] +\n remaining_shape\n\n4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch\n dimension, producing an output tensor of shape:\n\n [batch * prod(block_shape)] +\n [padded_shape[1] / block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1]] +\n remaining_shape\n\nSome examples:\n\n(1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n```\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 1]` and value:\n\n```\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\n(2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n```\nx = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 3]` and value:\n\n```\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\n(3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n```\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```\n\nThe output tensor has shape `[4, 2, 2, 1]` and value:\n\n```\nx = [[[[1], [3]], [[9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n```\n\n(4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and\n paddings = `[[0, 0], [2, 0]]`:\n\n```\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]]],\n [[[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```\n\nThe output tensor has shape `[8, 1, 3, 1]` and value:\n\n```\nx = [[[[0], [1], [3]]], [[[0], [9], [11]]],\n [[[0], [2], [4]]], [[[0], [10], [12]]],\n [[[0], [5], [7]]], [[[0], [13], [15]]],\n [[[0], [6], [8]]], [[[0], [14], [16]]]]\n```\n\nAmong others, this operation is useful for reducing atrous convolution into\nregular convolution." type_attr: "Tpaddings" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tblock_shape" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "Tpaddings" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "SpaceToBatch for N-D tensors of type T." description: "This operation divides \"spatial\" dimensions `[1, ..., M]` of the input into a\ngrid of blocks of shape `block_shape`, and interleaves these blocks with the\n\"batch\" dimension (0) such that in the output, the spatial dimensions\n`[1, ..., M]` correspond to the position within the grid, and the batch\ndimension combines both the position within a spatial block and the original\nbatch position. Prior to division into blocks, the spatial dimensions of the\ninput are optionally zero padded according to `paddings`. See below for a\nprecise description." } op { name: "SpaceToDepth" input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "block_size" type: "int" description: "The size of the spatial block." has_minimum: true minimum: 2 } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" s: "NCHW_VECT_C" } } } summary: "SpaceToDepth for tensors of type T." description: "Rearranges blocks of spatial data, into depth. More specifically,\nthis op outputs a copy of the input tensor where values from the `height`\nand `width` dimensions are moved to the `depth` dimension.\nThe attr `block_size` indicates the input block size.\n\n * Non-overlapping blocks of size `block_size x block size` are rearranged\n into depth at each location.\n * The depth of the output tensor is `block_size * block_size * input_depth`.\n * The Y, X coordinates within each block of the input become the high order\n component of the output channel index.\n * The input tensor\'s height and width must be divisible by block_size.\n\nThe `data_format` attr specifies the layout of the input and output tensors\nwith the following options:\n \"NHWC\": `[ batch, height, width, channels ]`\n \"NCHW\": `[ batch, channels, height, width ]`\n \"NCHW_VECT_C\":\n `qint8 [ batch, channels / 4, height, width, channels % 4 ]`\n\nIt is useful to consider the operation as transforming a 6-D Tensor.\ne.g. for data_format = NHWC,\n Each element in the input tensor can be specified via 6 coordinates,\n ordered by decreasing memory layout significance as:\n n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates\n within the output image, bX, bY means coordinates\n within the input block, iC means input channels).\n The output would be a transpose to the following layout:\n n,oY,oX,bY,bX,iC\n\nThis operation is useful for resizing the activations between convolutions\n(but keeping all data), e.g. instead of pooling. It is also useful for training\npurely convolutional models.\n\nFor example, given an input of shape `[1, 2, 2, 1]`, data_format = \"NHWC\" and\nblock_size = 2:\n\n```\nx = [[[[1], [2]],\n [[3], [4]]]]\n```\n\nThis operation will output a tensor of shape `[1, 1, 1, 4]`:\n\n```\n[[[[1, 2, 3, 4]]]]\n```\n\nHere, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,\nthe corresponding output will have a single element (i.e. width and height are\nboth 1) and will have a depth of 4 channels (1 * block_size * block_size).\nThe output element shape is `[1, 1, 4]`.\n\nFor an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.\n\n```\nx = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n```\n\nThis operation, for block_size of 2, will return the following tensor of shape\n`[1, 1, 1, 12]`\n\n```\n[[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]\n```\n\nSimilarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:\n\n```\nx = [[[[1], [2], [5], [6]],\n [[3], [4], [7], [8]],\n [[9], [10], [13], [14]],\n [[11], [12], [15], [16]]]]\n```\n\nthe operator will return the following tensor of shape `[1 2 2 4]`:\n\n```\nx = [[[[1, 2, 3, 4],\n [5, 6, 7, 8]],\n [[9, 10, 11, 12],\n [13, 14, 15, 16]]]]\n```" } op { name: "SparseAccumulatorApplyGradient" input_arg { name: "handle" description: "The handle to a accumulator." type: DT_STRING is_ref: true } input_arg { name: "local_step" description: "The local_step value at which the sparse gradient was computed." type: DT_INT64 } input_arg { name: "gradient_indices" description: "Indices of the sparse gradient to be accumulated. Must be a\nvector." type: DT_INT64 } input_arg { name: "gradient_values" description: "Values are the non-zero slices of the gradient, and must have\nthe same first dimension as indices, i.e., the nnz represented by indices and\nvalues must be consistent." type_attr: "dtype" } input_arg { name: "gradient_shape" description: "Shape of the sparse gradient to be accumulated." type: DT_INT64 } attr { name: "dtype" type: "type" description: "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator." allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "has_known_shape" type: "bool" description: "Boolean indicating whether gradient_shape is unknown, in which\ncase the input is ignored during validation." } summary: "Applies a sparse gradient to a given accumulator." description: "Does not add if local_step is smaller than the accumulator\'s\nglobal_step." } op { name: "SparseAccumulatorTakeGradient" input_arg { name: "handle" description: "The handle to a SparseConditionalAccumulator." type: DT_STRING is_ref: true } input_arg { name: "num_required" description: "Number of gradients required before we return an aggregate." type: DT_INT32 } output_arg { name: "indices" description: "Indices of the average of the accumulated sparse gradients." type: DT_INT64 } output_arg { name: "values" description: "Values of the average of the accumulated sparse gradients." type_attr: "dtype" } output_arg { name: "shape" description: "Shape of the average of the accumulated sparse gradients." type: DT_INT64 } attr { name: "dtype" type: "type" description: "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator." allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Extracts the average sparse gradient in a SparseConditionalAccumulator." description: "The op will blocks until sufficient (i.e., more than num_required)\ngradients have been accumulated. If the accumulator has already\naggregated more than num_required gradients, it will return its\naverage of the accumulated gradients. Also automatically increments\nthe recorded global_step in the accumulator by 1, and resets the\naggregate to 0." } op { name: "SparseAdd" input_arg { name: "a_indices" description: "2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix." type: DT_INT64 } input_arg { name: "a_values" description: "1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector." type_attr: "T" } input_arg { name: "a_shape" description: "1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector." type: DT_INT64 } input_arg { name: "b_indices" description: "2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix." type: DT_INT64 } input_arg { name: "b_values" description: "1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector." type_attr: "T" } input_arg { name: "b_shape" description: "1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector." type: DT_INT64 } input_arg { name: "thresh" description: "0-D. The magnitude threshold that determines if an output value/index\npair takes space." type_attr: "Treal" } output_arg { name: "sum_indices" type: DT_INT64 } output_arg { name: "sum_values" type_attr: "T" } output_arg { name: "sum_shape" type: DT_INT64 } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Treal" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Adds two `SparseTensor` objects to produce another `SparseTensor`." description: "The input `SparseTensor` objects\' indices are assumed ordered in standard\nlexicographic order. If this is not the case, before this step run\n`SparseReorder` to restore index ordering.\n\nBy default, if two values sum to zero at some index, the output `SparseTensor`\nwould still include that particular location in its index, storing a zero in the\ncorresponding value slot. To override this, callers can specify `thresh`,\nindicating that if the sum has a magnitude strictly smaller than `thresh`, its\ncorresponding value and index would then not be included. In particular,\n`thresh == 0` (default) means everything is kept and actual thresholding happens\nonly for a positive value.\n\nIn the following shapes, `nnz` is the count after taking `thresh` into account." } op { name: "SparseAddGrad" input_arg { name: "backprop_val_grad" description: "1-D with shape `[nnz(sum)]`. The gradient with respect to\nthe non-empty values of the sum." type_attr: "T" } input_arg { name: "a_indices" description: "2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`." type: DT_INT64 } input_arg { name: "b_indices" description: "2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`." type: DT_INT64 } input_arg { name: "sum_indices" description: "2-D. The `indices` of the sum `SparseTensor`, size\n`[nnz(sum), ndims]`." type: DT_INT64 } output_arg { name: "a_val_grad" description: "1-D with shape `[nnz(A)]`. The gradient with respect to the\nnon-empty values of A." type_attr: "T" } output_arg { name: "b_val_grad" description: "1-D with shape `[nnz(B)]`. The gradient with respect to the\nnon-empty values of B." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "The gradient operator for the SparseAdd op." description: "The SparseAdd op calculates A + B, where A, B, and the sum are all represented\nas `SparseTensor` objects. This op takes in the upstream gradient w.r.t.\nnon-empty values of the sum, and outputs the gradients w.r.t. the non-empty\nvalues of A and B." } op { name: "SparseApplyAdadelta" input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "accum" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "accum_update" description: ": Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "lr" description: "Learning rate. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay factor. Must be a scalar." type_attr: "T" } input_arg { name: "epsilon" description: "Constant factor. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } summary: "var: Should be from a Variable()." } op { name: "SparseApplyAdagrad" input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "accum" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "lr" description: "Learning rate. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme." description: "That is for rows we have grad for, we update var and accum as follows:\naccum += grad * grad\nvar -= lr * grad * (1 / sqrt(accum))" } op { name: "SparseApplyAdagradDA" input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "gradient_accumulator" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "gradient_squared_accumulator" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } input_arg { name: "lr" description: "Learning rate. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "global_step" description: "Training step number. Must be a scalar." type: DT_INT64 } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } summary: "Update entries in \'*var\' and \'*accum\' according to the proximal adagrad scheme." } op { name: "SparseApplyCenteredRMSProp" input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "mg" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "ms" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "mom" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay rate. Must be a scalar." type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" description: "Ridge term. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var, ms and mom." type_attr: "Tindices" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } summary: "Update \'*var\' according to the centered RMSProp algorithm." description: "The centered RMSProp algorithm uses an estimate of the centered second moment\n(i.e., the variance) for normalization, as opposed to regular RMSProp, which\nuses the (uncentered) second moment. This often helps with training, but is\nslightly more expensive in terms of computation and memory.\n\nNote that in dense implementation of this algorithm, mg, ms, and mom will\nupdate even if the grad is zero, but in this sparse implementation, mg, ms,\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nmean_grad = decay * mean_grad + (1-decay) * gradient\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)\n\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)\nvar <- var - mom" } op { name: "SparseApplyFtrl" input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "accum" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "linear" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "lr_power" description: "Scaling factor. Must be a scalar." type_attr: "T" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } summary: "Update relevant entries in \'*var\' according to the Ftrl-proximal scheme." description: "That is for rows we have grad for, we update var, accum and linear as follows:\naccum_new = accum + grad * grad\nlinear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new" } op { name: "SparseApplyFtrlV2" input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "accum" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "linear" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 shrinkage regulariation. Must be a scalar." type_attr: "T" } input_arg { name: "l2_shrinkage" type_attr: "T" } input_arg { name: "lr_power" description: "Scaling factor. Must be a scalar." type_attr: "T" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } summary: "Update relevant entries in \'*var\' according to the Ftrl-proximal scheme." description: "That is for rows we have grad for, we update var, accum and linear as follows:\ngrad_with_shrinkage = grad + 2 * l2_shrinkage * var\naccum_new = accum + grad_with_shrinkage * grad_with_shrinkage\nlinear += grad_with_shrinkage +\n (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new" } op { name: "SparseApplyMomentum" input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "accum" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "lr" description: "Learning rate. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } input_arg { name: "momentum" description: "Momentum. Must be a scalar." type_attr: "T" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } attr { name: "use_nesterov" type: "bool" default_value { b: false } description: "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum." } summary: "Update relevant entries in \'*var\' and \'*accum\' according to the momentum scheme." description: "Set use_nesterov = True if you want to use Nesterov momentum.\n\nThat is for rows we have grad for, we update var and accum as follows:\n\naccum = accum * momentum + grad\nvar -= lr * accum" } op { name: "SparseApplyProximalAdagrad" input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "accum" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "lr" description: "Learning rate. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } summary: "Sparse update entries in \'*var\' and \'*accum\' according to FOBOS algorithm." description: "That is for rows we have grad for, we update var and accum as follows:\naccum += grad * grad\nprox_v = var\nprox_v -= lr * grad * (1 / sqrt(accum))\nvar = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}" } op { name: "SparseApplyProximalGradientDescent" input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "alpha" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } summary: "Sparse update \'*var\' as FOBOS algorithm with fixed learning rate." description: "That is for rows we have grad for, we update var as follows:\nprox_v = var - alpha * grad\nvar = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}" } op { name: "SparseApplyRMSProp" input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "ms" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "mom" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay rate. Must be a scalar." type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" description: "Ridge term. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var, ms and mom." type_attr: "Tindices" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } summary: "Update \'*var\' according to the RMSProp algorithm." description: "Note that in dense implementation of this algorithm, ms and mom will\nupdate even if the grad is zero, but in this sparse implementation, ms\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon)\n\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)\nvar <- var - mom" } op { name: "SparseConcat" input_arg { name: "indices" description: "2-D. Indices of each input `SparseTensor`." type: DT_INT64 number_attr: "N" } input_arg { name: "values" description: "1-D. Non-empty values of each `SparseTensor`." type_attr: "T" number_attr: "N" } input_arg { name: "shapes" description: "1-D. Shapes of each `SparseTensor`." type: DT_INT64 number_attr: "N" } output_arg { name: "output_indices" description: "2-D. Indices of the concatenated `SparseTensor`." type: DT_INT64 } output_arg { name: "output_values" description: "1-D. Non-empty values of the concatenated `SparseTensor`." type_attr: "T" } output_arg { name: "output_shape" description: "1-D. Shape of the concatenated `SparseTensor`." type: DT_INT64 } attr { name: "concat_dim" type: "int" description: "Dimension to concatenate along. Must be in range [-rank, rank),\nwhere rank is the number of dimensions in each input `SparseTensor`." } attr { name: "N" type: "int" has_minimum: true minimum: 2 } attr { name: "T" type: "type" } summary: "Concatenates a list of `SparseTensor` along the specified dimension." description: "Concatenation is with respect to the dense versions of these sparse tensors.\nIt is assumed that each input is a `SparseTensor` whose elements are ordered\nalong increasing dimension number.\n\nAll inputs\' shapes must match, except for the concat dimension. The\n`indices`, `values`, and `shapes` lists must have the same length.\n\nThe output shape is identical to the inputs\', except along the concat\ndimension, where it is the sum of the inputs\' sizes along that dimension.\n\nThe output elements will be resorted to preserve the sort order along\nincreasing dimension number.\n\nThis op runs in `O(M log M)` time, where `M` is the total number of non-empty\nvalues across all inputs. This is due to the need for an internal sort in\norder to concatenate efficiently across an arbitrary dimension.\n\nFor example, if `concat_dim = 1` and the inputs are\n\n sp_inputs[0]: shape = [2, 3]\n [0, 2]: \"a\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n\n sp_inputs[1]: shape = [2, 4]\n [0, 1]: \"d\"\n [0, 2]: \"e\"\n\nthen the output will be\n\n shape = [2, 7]\n [0, 2]: \"a\"\n [0, 4]: \"d\"\n [0, 5]: \"e\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n\nGraphically this is equivalent to doing\n\n [ a] concat [ d e ] = [ a d e ]\n [b c ] [ ] [b c ]" } op { name: "SparseConditionalAccumulator" output_arg { name: "handle" description: "The handle to the accumulator." type: DT_STRING is_ref: true } attr { name: "dtype" type: "type" description: "The type of the value being accumulated." allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "shape" type: "shape" description: "The shape of the values." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this accumulator is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this accumulator will be shared under the given name\nacross multiple sessions." } summary: "A conditional accumulator for aggregating sparse gradients." description: "The accumulator accepts gradients marked with local_step greater or\nequal to the most recent global_step known to the accumulator. The\naverage can be extracted from the accumulator, provided sufficient\ngradients have been accumulated. Extracting the average automatically\nresets the aggregate to 0, and increments the global_step recorded by\nthe accumulator." is_stateful: true } op { name: "SparseCross" input_arg { name: "indices" description: "2-D. Indices of each input `SparseTensor`." type: DT_INT64 number_attr: "N" } input_arg { name: "values" description: "1-D. values of each `SparseTensor`." type_list_attr: "sparse_types" } input_arg { name: "shapes" description: "1-D. Shapes of each `SparseTensor`." type: DT_INT64 number_attr: "N" } input_arg { name: "dense_inputs" description: "2-D. Columns represented by dense `Tensor`." type_list_attr: "dense_types" } output_arg { name: "output_indices" description: "2-D. Indices of the concatenated `SparseTensor`." type: DT_INT64 } output_arg { name: "output_values" description: "1-D. Non-empty values of the concatenated or hashed\n`SparseTensor`." type_attr: "out_type" } output_arg { name: "output_shape" description: "1-D. Shape of the concatenated `SparseTensor`." type: DT_INT64 } attr { name: "N" type: "int" has_minimum: true } attr { name: "hashed_output" type: "bool" description: "If true, returns the hash of the cross instead of the string.\nThis will allow us avoiding string manipulations." } attr { name: "num_buckets" type: "int" description: "It is used if hashed_output is true.\noutput = hashed_value%num_buckets if num_buckets > 0 else hashed_value." has_minimum: true } attr { name: "hash_key" type: "int" description: "Specify the hash_key that will be used by the `FingerprintCat64`\nfunction to combine the crosses fingerprints." } attr { name: "sparse_types" type: "list(type)" has_minimum: true allowed_values { list { type: DT_INT64 type: DT_STRING } } } attr { name: "dense_types" type: "list(type)" has_minimum: true allowed_values { list { type: DT_INT64 type: DT_STRING } } } attr { name: "out_type" type: "type" allowed_values { list { type: DT_INT64 type: DT_STRING } } } attr { name: "internal_type" type: "type" allowed_values { list { type: DT_INT64 type: DT_STRING } } } summary: "Generates sparse cross from a list of sparse and dense tensors." description: "The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each\nrepresenting features of one feature column. It outputs a 2D `SparseTensor` with\nthe batchwise crosses of these features.\n\nFor example, if the inputs are\n\n inputs[0]: SparseTensor with shape = [2, 2]\n [0, 0]: \"a\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n\n inputs[1]: SparseTensor with shape = [2, 1]\n [0, 0]: \"d\"\n [1, 0]: \"e\"\n\n inputs[2]: Tensor [[\"f\"], [\"g\"]]\n\nthen the output will be\n\n shape = [2, 2]\n [0, 0]: \"a_X_d_X_f\"\n [1, 0]: \"b_X_e_X_g\"\n [1, 1]: \"c_X_e_X_g\"\n\nif hashed_output=true then the output will be\n\n shape = [2, 2]\n [0, 0]: FingerprintCat64(\n Fingerprint64(\"f\"), FingerprintCat64(\n Fingerprint64(\"d\"), Fingerprint64(\"a\")))\n [1, 0]: FingerprintCat64(\n Fingerprint64(\"g\"), FingerprintCat64(\n Fingerprint64(\"e\"), Fingerprint64(\"b\")))\n [1, 1]: FingerprintCat64(\n Fingerprint64(\"g\"), FingerprintCat64(\n Fingerprint64(\"e\"), Fingerprint64(\"c\")))" } op { name: "SparseDenseCwiseAdd" input_arg { name: "sp_indices" description: "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering." type: DT_INT64 } input_arg { name: "sp_values" description: "1-D. `N` non-empty values corresponding to `sp_indices`." type_attr: "T" } input_arg { name: "sp_shape" description: "1-D. Shape of the input SparseTensor." type: DT_INT64 } input_arg { name: "dense" description: "`R`-D. The dense Tensor operand." type_attr: "T" } output_arg { name: "output" description: "1-D. The `N` values that are operated on." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Adds up a SparseTensor and a dense Tensor, using these special rules:" description: "(1) Broadcasts the dense side to have the same shape as the sparse side, if\n eligible;\n(2) Then, only the dense values pointed to by the indices of the SparseTensor\n participate in the cwise addition.\n\nBy these rules, the result is a logical SparseTensor with exactly the same\nindices and shape, but possibly with different non-zero values. The output of\nthis Op is the resultant non-zero values." } op { name: "SparseDenseCwiseDiv" input_arg { name: "sp_indices" description: "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering." type: DT_INT64 } input_arg { name: "sp_values" description: "1-D. `N` non-empty values corresponding to `sp_indices`." type_attr: "T" } input_arg { name: "sp_shape" description: "1-D. Shape of the input SparseTensor." type: DT_INT64 } input_arg { name: "dense" description: "`R`-D. The dense Tensor operand." type_attr: "T" } output_arg { name: "output" description: "1-D. The `N` values that are operated on." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Component-wise divides a SparseTensor by a dense Tensor." description: "*Limitation*: this Op only broadcasts the dense side to the sparse side, but not\nthe other direction." } op { name: "SparseDenseCwiseMul" input_arg { name: "sp_indices" description: "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering." type: DT_INT64 } input_arg { name: "sp_values" description: "1-D. `N` non-empty values corresponding to `sp_indices`." type_attr: "T" } input_arg { name: "sp_shape" description: "1-D. Shape of the input SparseTensor." type: DT_INT64 } input_arg { name: "dense" description: "`R`-D. The dense Tensor operand." type_attr: "T" } output_arg { name: "output" description: "1-D. The `N` values that are operated on." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Component-wise multiplies a SparseTensor by a dense Tensor." description: "The output locations corresponding to the implicitly zero elements in the sparse\ntensor will be zero (i.e., will not take up storage space), regardless of the\ncontents of the dense tensor (even if it\'s +/-INF and that INF*0 == NaN).\n\n*Limitation*: this Op only broadcasts the dense side to the sparse side, but not\nthe other direction." } op { name: "SparseFillEmptyRows" input_arg { name: "indices" description: "2-D. the indices of the sparse tensor." type: DT_INT64 } input_arg { name: "values" description: "1-D. the values of the sparse tensor." type_attr: "T" } input_arg { name: "dense_shape" description: "1-D. the shape of the sparse tensor." type: DT_INT64 } input_arg { name: "default_value" description: "0-D. default value to insert into location `[row, 0, ..., 0]`\n for rows missing from the input sparse tensor.\noutput indices: 2-D. the indices of the filled sparse tensor." type_attr: "T" } output_arg { name: "output_indices" type: DT_INT64 } output_arg { name: "output_values" description: "1-D. the values of the filled sparse tensor." type_attr: "T" } output_arg { name: "empty_row_indicator" description: "1-D. whether the dense row was missing in the\ninput sparse tensor." type: DT_BOOL } output_arg { name: "reverse_index_map" description: "1-D. a map from the input indices to the output indices." type: DT_INT64 } attr { name: "T" type: "type" } summary: "Fills empty rows in the input 2-D `SparseTensor` with a default value." description: "The input `SparseTensor` is represented via the tuple of inputs\n(`indices`, `values`, `dense_shape`). The output `SparseTensor` has the\nsame `dense_shape` but with indices `output_indices` and values\n`output_values`.\n\nThis op inserts a single entry for every row that doesn\'t have any values.\nThe index is created as `[row, 0, ..., 0]` and the inserted value\nis `default_value`.\n\nFor example, suppose `sp_input` has shape `[5, 6]` and non-empty values:\n\n [0, 1]: a\n [0, 3]: b\n [2, 0]: c\n [3, 1]: d\n\nRows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:\n\n [0, 1]: a\n [0, 3]: b\n [1, 0]: default_value\n [2, 0]: c\n [3, 1]: d\n [4, 0]: default_value\n\nThe output `SparseTensor` will be in row-major order and will have the\nsame shape as the input.\n\nThis op also returns an indicator vector shaped `[dense_shape[0]]` such that\n\n empty_row_indicator[i] = True iff row i was an empty row.\n\nAnd a reverse index map vector shaped `[indices.shape[0]]` that is used during\nbackpropagation,\n\n reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]" } op { name: "SparseFillEmptyRowsGrad" input_arg { name: "reverse_index_map" description: "1-D. The reverse index map from SparseFillEmptyRows." type: DT_INT64 } input_arg { name: "grad_values" description: "1-D. The gradients from backprop." type_attr: "T" } output_arg { name: "d_values" description: "1-D. The backprop into values." type_attr: "T" } output_arg { name: "d_default_value" description: "0-D. The backprop into default_value." type_attr: "T" } attr { name: "T" type: "type" } summary: "The gradient of SparseFillEmptyRows." description: "Takes vectors reverse_index_map, shaped `[N]`, and grad_values,\nshaped `[N_full]`, where `N_full >= N` and copies data into either\n`d_values` or `d_default_value`. Here `d_values` is shaped `[N]` and\n`d_default_value` is a scalar.\n\n d_values[j] = grad_values[reverse_index_map[j]]\n d_default_value = sum_{k : 0 .. N_full - 1} (\n grad_values[k] * 1{k not in reverse_index_map})" } op { name: "SparseMatMul" input_arg { name: "a" type_attr: "Ta" } input_arg { name: "b" type_attr: "Tb" } output_arg { name: "product" type: DT_FLOAT } attr { name: "transpose_a" type: "bool" default_value { b: false } } attr { name: "transpose_b" type: "bool" default_value { b: false } } attr { name: "a_is_sparse" type: "bool" default_value { b: false } } attr { name: "b_is_sparse" type: "bool" default_value { b: false } } attr { name: "Ta" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_BFLOAT16 } } } attr { name: "Tb" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_BFLOAT16 } } } summary: "Multiply matrix \"a\" by matrix \"b\"." description: "The inputs must be two-dimensional matrices and the inner dimension of \"a\" must\nmatch the outer dimension of \"b\". This op is optimized for the case where at\nleast one of \"a\" or \"b\" is sparse. The breakeven for using this versus a dense\nmatrix multiply on one platform was 30% zero values in the sparse matrix.\n\nThe gradient computation of this operation will only take advantage of sparsity\nin the input gradient when that gradient comes from a Relu." } op { name: "SparseReduceMax" input_arg { name: "input_indices" description: "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering." type: DT_INT64 } input_arg { name: "input_values" description: "1-D. `N` non-empty values corresponding to `input_indices`." type_attr: "T" } input_arg { name: "input_shape" description: "1-D. Shape of the input SparseTensor." type: DT_INT64 } input_arg { name: "reduction_axes" description: "1-D. Length-`K` vector containing the reduction axes." type: DT_INT32 } output_arg { name: "output" description: "`R-K`-D. The reduced Tensor." type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } description: "If true, retain reduced dimensions with length 1." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Computes the max of elements across dimensions of a SparseTensor." description: "This Op takes a SparseTensor and is the sparse counterpart to\n`tf.reduce_max()`. In particular, this Op also returns a dense `Tensor`\ninstead of a sparse one.\n\nReduces `sp_input` along the dimensions given in `reduction_axes`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained\nwith length 1.\n\nIf `reduction_axes` has no entries, all dimensions are reduced, and a tensor\nwith a single element is returned. Additionally, the axes can be negative,\nwhich are interpreted according to the indexing rules in Python." } op { name: "SparseReduceMaxSparse" input_arg { name: "input_indices" description: "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering." type: DT_INT64 } input_arg { name: "input_values" description: "1-D. `N` non-empty values corresponding to `input_indices`." type_attr: "T" } input_arg { name: "input_shape" description: "1-D. Shape of the input SparseTensor." type: DT_INT64 } input_arg { name: "reduction_axes" description: "1-D. Length-`K` vector containing the reduction axes." type: DT_INT32 } output_arg { name: "output_indices" type: DT_INT64 } output_arg { name: "output_values" type_attr: "T" } output_arg { name: "output_shape" type: DT_INT64 } attr { name: "keep_dims" type: "bool" default_value { b: false } description: "If true, retain reduced dimensions with length 1." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Computes the max of elements across dimensions of a SparseTensor." description: "This Op takes a SparseTensor and is the sparse counterpart to\n`tf.reduce_max()`. In contrast to SparseReduceMax, this Op returns a\nSparseTensor.\n\nReduces `sp_input` along the dimensions given in `reduction_axes`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained\nwith length 1.\n\nIf `reduction_axes` has no entries, all dimensions are reduced, and a tensor\nwith a single element is returned. Additionally, the axes can be negative,\nwhich are interpreted according to the indexing rules in Python." } op { name: "SparseReduceSum" input_arg { name: "input_indices" description: "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering." type: DT_INT64 } input_arg { name: "input_values" description: "1-D. `N` non-empty values corresponding to `input_indices`." type_attr: "T" } input_arg { name: "input_shape" description: "1-D. Shape of the input SparseTensor." type: DT_INT64 } input_arg { name: "reduction_axes" description: "1-D. Length-`K` vector containing the reduction axes." type: DT_INT32 } output_arg { name: "output" description: "`R-K`-D. The reduced Tensor." type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } description: "If true, retain reduced dimensions with length 1." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Computes the sum of elements across dimensions of a SparseTensor." description: "This Op takes a SparseTensor and is the sparse counterpart to\n`tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`\ninstead of a sparse one.\n\nReduces `sp_input` along the dimensions given in `reduction_axes`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained\nwith length 1.\n\nIf `reduction_axes` has no entries, all dimensions are reduced, and a tensor\nwith a single element is returned. Additionally, the axes can be negative,\nwhich are interpreted according to the indexing rules in Python." } op { name: "SparseReduceSumSparse" input_arg { name: "input_indices" description: "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering." type: DT_INT64 } input_arg { name: "input_values" description: "1-D. `N` non-empty values corresponding to `input_indices`." type_attr: "T" } input_arg { name: "input_shape" description: "1-D. Shape of the input SparseTensor." type: DT_INT64 } input_arg { name: "reduction_axes" description: "1-D. Length-`K` vector containing the reduction axes." type: DT_INT32 } output_arg { name: "output_indices" type: DT_INT64 } output_arg { name: "output_values" type_attr: "T" } output_arg { name: "output_shape" type: DT_INT64 } attr { name: "keep_dims" type: "bool" default_value { b: false } description: "If true, retain reduced dimensions with length 1." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Computes the sum of elements across dimensions of a SparseTensor." description: "This Op takes a SparseTensor and is the sparse counterpart to\n`tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a\nSparseTensor.\n\nReduces `sp_input` along the dimensions given in `reduction_axes`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained\nwith length 1.\n\nIf `reduction_axes` has no entries, all dimensions are reduced, and a tensor\nwith a single element is returned. Additionally, the axes can be negative,\nwhich are interpreted according to the indexing rules in Python." } op { name: "SparseReorder" input_arg { name: "input_indices" description: "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering." type: DT_INT64 } input_arg { name: "input_values" description: "1-D. `N` non-empty values corresponding to `input_indices`." type_attr: "T" } input_arg { name: "input_shape" description: "1-D. Shape of the input SparseTensor." type: DT_INT64 } output_arg { name: "output_indices" description: "2-D. `N x R` matrix with the same indices as input_indices, but\nin canonical row-major ordering." type: DT_INT64 } output_arg { name: "output_values" description: "1-D. `N` non-empty values corresponding to `output_indices`." type_attr: "T" } attr { name: "T" type: "type" } summary: "Reorders a SparseTensor into the canonical, row-major ordering." description: "Note that by convention, all sparse ops preserve the canonical ordering along\nincreasing dimension number. The only time ordering can be violated is during\nmanual manipulation of the indices and values vectors to add entries.\n\nReordering does not affect the shape of the SparseTensor.\n\nIf the tensor has rank `R` and `N` non-empty values, `input_indices` has\nshape `[N, R]`, input_values has length `N`, and input_shape has length `R`." } op { name: "SparseReshape" input_arg { name: "input_indices" description: "2-D. `N x R_in` matrix with the indices of non-empty values in a\nSparseTensor." type: DT_INT64 } input_arg { name: "input_shape" description: "1-D. `R_in` vector with the input SparseTensor\'s dense shape." type: DT_INT64 } input_arg { name: "new_shape" description: "1-D. `R_out` vector with the requested new dense shape." type: DT_INT64 } output_arg { name: "output_indices" description: "2-D. `N x R_out` matrix with the updated indices of non-empty\nvalues in the output SparseTensor." type: DT_INT64 } output_arg { name: "output_shape" description: "1-D. `R_out` vector with the full dense shape of the output\nSparseTensor. This is the same as `new_shape` but with any -1 dimensions\nfilled in." type: DT_INT64 } summary: "Reshapes a SparseTensor to represent values in a new dense shape." description: "This operation has the same semantics as reshape on the represented dense\ntensor. The `input_indices` are recomputed based on the requested `new_shape`.\n\nIf one component of `new_shape` is the special value -1, the size of that\ndimension is computed so that the total dense size remains constant. At\nmost one component of `new_shape` can be -1. The number of dense elements\nimplied by `new_shape` must be the same as the number of dense elements\noriginally implied by `input_shape`.\n\nReshaping does not affect the order of values in the SparseTensor.\n\nIf the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`\nhas length `R_out`, then `input_indices` has shape `[N, R_in]`,\n`input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and\n`output_shape` has length `R_out`." } op { name: "SparseSegmentMean" input_arg { name: "data" type_attr: "T" } input_arg { name: "indices" description: "A 1-D tensor. Has same rank as `segment_ids`." type_attr: "Tidx" } input_arg { name: "segment_ids" description: "A 1-D tensor. Values should be sorted and can be repeated." type: DT_INT32 } output_arg { name: "output" description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Computes the mean along sparse segments of a tensor." description: "Read @{$math_ops#segmentation$the section on segmentation} for an explanation of\nsegments.\n\nLike `SegmentMean`, but `segment_ids` can have rank less than `data`\'s first\ndimension, selecting a subset of dimension 0, specified by `indices`." } op { name: "SparseSegmentMeanGrad" input_arg { name: "grad" description: "gradient propagated to the SparseSegmentMean op." type_attr: "T" } input_arg { name: "indices" description: "indices passed to the corresponding SparseSegmentMean op." type_attr: "Tidx" } input_arg { name: "segment_ids" description: "segment_ids passed to the corresponding SparseSegmentMean op." type: DT_INT32 } input_arg { name: "output_dim0" description: "dimension 0 of \"data\" passed to SparseSegmentMean op." type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Computes gradients for SparseSegmentMean." description: "Returns tensor \"output\" with same shape as grad, except for dimension 0 whose\nvalue is output_dim0." } op { name: "SparseSegmentSqrtN" input_arg { name: "data" type_attr: "T" } input_arg { name: "indices" description: "A 1-D tensor. Has same rank as `segment_ids`." type_attr: "Tidx" } input_arg { name: "segment_ids" description: "A 1-D tensor. Values should be sorted and can be repeated." type: DT_INT32 } output_arg { name: "output" description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Computes the sum along sparse segments of a tensor divided by the sqrt of N." description: "N is the size of the segment being reduced.\n\nRead @{$math_ops#segmentation$the section on segmentation} for an explanation of\nsegments." } op { name: "SparseSegmentSqrtNGrad" input_arg { name: "grad" description: "gradient propagated to the SparseSegmentSqrtN op." type_attr: "T" } input_arg { name: "indices" description: "indices passed to the corresponding SparseSegmentSqrtN op." type_attr: "Tidx" } input_arg { name: "segment_ids" description: "segment_ids passed to the corresponding SparseSegmentSqrtN op." type: DT_INT32 } input_arg { name: "output_dim0" description: "dimension 0 of \"data\" passed to SparseSegmentSqrtN op." type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Computes gradients for SparseSegmentSqrtN." description: "Returns tensor \"output\" with same shape as grad, except for dimension 0 whose\nvalue is output_dim0." } op { name: "SparseSegmentSum" input_arg { name: "data" type_attr: "T" } input_arg { name: "indices" description: "A 1-D tensor. Has same rank as `segment_ids`." type_attr: "Tidx" } input_arg { name: "segment_ids" description: "A 1-D tensor. Values should be sorted and can be repeated." type: DT_INT32 } output_arg { name: "output" description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Computes the sum along sparse segments of a tensor." description: "Read @{$math_ops#segmentation$the section on segmentation} for an explanation of\nsegments.\n\nLike `SegmentSum`, but `segment_ids` can have rank less than `data`\'s first\ndimension, selecting a subset of dimension 0, specified by `indices`.\n\nFor example:\n\n```python\nc = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])\n\n# Select two rows, one segment.\ntf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))\n# => [[0 0 0 0]]\n\n# Select two rows, two segment.\ntf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))\n# => [[ 1 2 3 4]\n# [-1 -2 -3 -4]]\n\n# Select all rows, two segments.\ntf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))\n# => [[0 0 0 0]\n# [5 6 7 8]]\n\n# Which is equivalent to:\ntf.segment_sum(c, tf.constant([0, 0, 1]))\n```" } op { name: "SparseSlice" input_arg { name: "indices" description: "2-D tensor represents the indices of the sparse tensor." type: DT_INT64 } input_arg { name: "values" description: "1-D tensor represents the values of the sparse tensor." type_attr: "T" } input_arg { name: "shape" description: "1-D. tensor represents the shape of the sparse tensor." type: DT_INT64 } input_arg { name: "start" description: "1-D. tensor represents the start of the slice." type: DT_INT64 } input_arg { name: "size" description: "1-D. tensor represents the size of the slice.\noutput indices: A list of 1-D tensors represents the indices of the output\nsparse tensors." type: DT_INT64 } output_arg { name: "output_indices" type: DT_INT64 } output_arg { name: "output_values" description: "A list of 1-D tensors represents the values of the output sparse\ntensors." type_attr: "T" } output_arg { name: "output_shape" description: "A list of 1-D tensors represents the shape of the output sparse\ntensors." type: DT_INT64 } attr { name: "T" type: "type" } summary: "Slice a `SparseTensor` based on the `start` and `size`." description: "For example, if the input is\n\n input_tensor = shape = [2, 7]\n [ a d e ]\n [b c ]\n\nGraphically the output tensors are:\n\n sparse_slice([0, 0], [2, 4]) = shape = [2, 4]\n [ a ]\n [b c ]\n\n sparse_slice([0, 4], [2, 3]) = shape = [2, 3]\n [ d e ]\n [ ]" } op { name: "SparseSoftmax" input_arg { name: "sp_indices" description: "2-D. `NNZ x R` matrix with the indices of non-empty values in a\nSparseTensor, in canonical ordering." type: DT_INT64 } input_arg { name: "sp_values" description: "1-D. `NNZ` non-empty values corresponding to `sp_indices`." type_attr: "T" } input_arg { name: "sp_shape" description: "1-D. Shape of the input SparseTensor." type: DT_INT64 } output_arg { name: "output" description: "1-D. The `NNZ` values for the result `SparseTensor`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } summary: "Applies softmax to a batched N-D `SparseTensor`." description: "The inputs represent an N-D SparseTensor with logical shape `[..., B, C]`\n(where `N >= 2`), and with indices sorted in the canonical lexicographic order.\n\nThis op is equivalent to applying the normal `tf.nn.softmax()` to each innermost\nlogical submatrix with shape `[B, C]`, but with the catch that *the implicitly\nzero elements do not participate*. Specifically, the algorithm is equivalent\nto the following:\n\n (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix\n with shape `[B, C]`, along the size-C dimension;\n (2) Masks out the original implicitly-zero locations;\n (3) Renormalizes the remaining elements.\n\nHence, the `SparseTensor` result has exactly the same non-zero indices and\nshape." } op { name: "SparseSoftmaxCrossEntropyWithLogits" input_arg { name: "features" description: "batch_size x num_classes matrix" type_attr: "T" } input_arg { name: "labels" description: "batch_size vector with values in [0, num_classes).\nThis is the label for the given minibatch entry." type_attr: "Tlabels" } output_arg { name: "loss" description: "Per example loss (batch_size vector)." type_attr: "T" } output_arg { name: "backprop" description: "backpropagated gradients (batch_size x num_classes matrix)." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tlabels" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Computes softmax cross entropy cost and gradients to backpropagate." description: "Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept\na matrix of label probabilities, but rather a single label per row\nof features. This label is considered to have probability 1.0 for the\ngiven row.\n\nInputs are the logits, not probabilities." } op { name: "SparseSparseMaximum" input_arg { name: "a_indices" description: "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, in the canonical lexicographic ordering." type: DT_INT64 } input_arg { name: "a_values" description: "1-D. `N` non-empty values corresponding to `a_indices`." type_attr: "T" } input_arg { name: "a_shape" description: "1-D. Shape of the input SparseTensor." type: DT_INT64 } input_arg { name: "b_indices" description: "counterpart to `a_indices` for the other operand." type: DT_INT64 } input_arg { name: "b_values" description: "counterpart to `a_values` for the other operand; must be of the same dtype." type_attr: "T" } input_arg { name: "b_shape" description: "counterpart to `a_shape` for the other operand; the two shapes must be equal." type: DT_INT64 } output_arg { name: "output_indices" description: "2-D. The indices of the output SparseTensor." type: DT_INT64 } output_arg { name: "output_values" description: "1-D. The values of the output SparseTensor." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Returns the element-wise max of two SparseTensors." description: "Assumes the two SparseTensors have the same shape, i.e., no broadcasting." } op { name: "SparseSparseMinimum" input_arg { name: "a_indices" description: "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, in the canonical lexicographic ordering." type: DT_INT64 } input_arg { name: "a_values" description: "1-D. `N` non-empty values corresponding to `a_indices`." type_attr: "T" } input_arg { name: "a_shape" description: "1-D. Shape of the input SparseTensor." type: DT_INT64 } input_arg { name: "b_indices" description: "counterpart to `a_indices` for the other operand." type: DT_INT64 } input_arg { name: "b_values" description: "counterpart to `a_values` for the other operand; must be of the same dtype." type_attr: "T" } input_arg { name: "b_shape" description: "counterpart to `a_shape` for the other operand; the two shapes must be equal." type: DT_INT64 } output_arg { name: "output_indices" description: "2-D. The indices of the output SparseTensor." type: DT_INT64 } output_arg { name: "output_values" description: "1-D. The values of the output SparseTensor." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Returns the element-wise min of two SparseTensors." description: "Assumes the two SparseTensors have the same shape, i.e., no broadcasting." } op { name: "SparseSplit" input_arg { name: "split_dim" description: "0-D. The dimension along which to split. Must be in the range\n`[0, rank(shape))`." type: DT_INT64 } input_arg { name: "indices" description: "2-D tensor represents the indices of the sparse tensor." type: DT_INT64 } input_arg { name: "values" description: "1-D tensor represents the values of the sparse tensor." type_attr: "T" } input_arg { name: "shape" description: "1-D. tensor represents the shape of the sparse tensor.\noutput indices: A list of 1-D tensors represents the indices of the output\nsparse tensors." type: DT_INT64 } output_arg { name: "output_indices" type: DT_INT64 number_attr: "num_split" } output_arg { name: "output_values" description: "A list of 1-D tensors represents the values of the output sparse\ntensors." type_attr: "T" number_attr: "num_split" } output_arg { name: "output_shape" description: "A list of 1-D tensors represents the shape of the output sparse\ntensors." type: DT_INT64 number_attr: "num_split" } attr { name: "num_split" type: "int" description: "The number of ways to split." has_minimum: true minimum: 1 } attr { name: "T" type: "type" } summary: "Split a `SparseTensor` into `num_split` tensors along one dimension." description: "If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices\n`[0 : shape[split_dim] % num_split]` gets one extra dimension.\nFor example, if `split_dim = 1` and `num_split = 2` and the input is\n\n input_tensor = shape = [2, 7]\n [ a d e ]\n [b c ]\n\nGraphically the output tensors are:\n\n output_tensor[0] = shape = [2, 4]\n [ a ]\n [b c ]\n\n output_tensor[1] = shape = [2, 3]\n [ d e ]\n [ ]" } op { name: "SparseTensorDenseAdd" input_arg { name: "a_indices" description: "2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`." type_attr: "Tindices" } input_arg { name: "a_values" description: "1-D. The `values` of the `SparseTensor`, with shape `[nnz]`." type_attr: "T" } input_arg { name: "a_shape" description: "1-D. The `shape` of the `SparseTensor`, with shape `[ndims]`." type_attr: "Tindices" } input_arg { name: "b" description: "`ndims`-D Tensor. With shape `a_shape`." type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`." description: "This Op does not require `a_indices` be sorted in standard lexicographic order." } op { name: "SparseTensorDenseMatMul" input_arg { name: "a_indices" description: "2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix." type_attr: "Tindices" } input_arg { name: "a_values" description: "1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector." type_attr: "T" } input_arg { name: "a_shape" description: "1-D. The `shape` of the `SparseTensor`, size `[2]` Vector." type: DT_INT64 } input_arg { name: "b" description: "2-D. A dense Matrix." type_attr: "T" } output_arg { name: "product" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "adjoint_a" type: "bool" default_value { b: false } description: "Use the adjoint of A in the matrix multiply. If A is complex, this\nis transpose(conj(A)). Otherwise it\'s transpose(A)." } attr { name: "adjoint_b" type: "bool" default_value { b: false } description: "Use the adjoint of B in the matrix multiply. If B is complex, this\nis transpose(conj(B)). Otherwise it\'s transpose(B)." } summary: "Multiply SparseTensor (of rank 2) \"A\" by dense matrix \"B\"." description: "No validity checking is performed on the indices of A. However, the following\ninput format is recommended for optimal behavior:\n\nif adjoint_a == false:\n A should be sorted in lexicographically increasing order. Use SparseReorder\n if you\'re not sure.\nif adjoint_a == true:\n A should be sorted in order of increasing dimension 1 (i.e., \"column major\"\n order instead of \"row major\" order)." } op { name: "SparseTensorSliceDataset" input_arg { name: "indices" type: DT_INT64 } input_arg { name: "values" type_attr: "Tvalues" } input_arg { name: "dense_shape" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "Tvalues" type: "type" } summary: "Creates a dataset that splits a SparseTensor into elements row-wise." is_stateful: true } op { name: "SparseToDense" input_arg { name: "sparse_indices" description: "0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete\nindex where `sparse_values[i]` will be placed." type_attr: "Tindices" } input_arg { name: "output_shape" description: "1-D. Shape of the dense output tensor." type_attr: "Tindices" } input_arg { name: "sparse_values" description: "1-D. Values corresponding to each row of `sparse_indices`,\nor a scalar value to be used for all sparse indices." type_attr: "T" } input_arg { name: "default_value" description: "Scalar value to set for indices not specified in\n`sparse_indices`." type_attr: "T" } output_arg { name: "dense" description: "Dense output tensor of shape `output_shape`." type_attr: "T" } attr { name: "validate_indices" type: "bool" default_value { b: true } description: "If true, indices are checked to make sure they are sorted in\nlexicographic order and that there are no repeats." } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Converts a sparse representation into a dense tensor." description: "Builds an array `dense` with shape `output_shape` such that\n\n```\n# If sparse_indices is scalar\ndense[i] = (i == sparse_indices ? sparse_values : default_value)\n\n# If sparse_indices is a vector, then for each i\ndense[sparse_indices[i]] = sparse_values[i]\n\n# If sparse_indices is an n by d matrix, then for each i in [0, n)\ndense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]\n```\n\nAll other values in `dense` are set to `default_value`. If `sparse_values` is a\nscalar, all sparse indices are set to this single value.\n\nIndices should be sorted in lexicographic order, and indices must not\ncontain any repeats. If `validate_indices` is true, these properties\nare checked during execution." } op { name: "SparseToSparseSetOperation" input_arg { name: "set1_indices" description: "2D `Tensor`, indices of a `SparseTensor`. Must be in row-major\norder." type: DT_INT64 } input_arg { name: "set1_values" description: "1D `Tensor`, values of a `SparseTensor`. Must be in row-major\norder." type_attr: "T" } input_arg { name: "set1_shape" description: "1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must\nbe the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the\nmax set size across `0...n-1` dimensions." type: DT_INT64 } input_arg { name: "set2_indices" description: "2D `Tensor`, indices of a `SparseTensor`. Must be in row-major\norder." type: DT_INT64 } input_arg { name: "set2_values" description: "1D `Tensor`, values of a `SparseTensor`. Must be in row-major\norder." type_attr: "T" } input_arg { name: "set2_shape" description: "1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must\nbe the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the\nmax set size across `0...n-1` dimensions." type: DT_INT64 } output_arg { name: "result_indices" description: "2D indices of a `SparseTensor`." type: DT_INT64 } output_arg { name: "result_values" description: "1D values of a `SparseTensor`." type_attr: "T" } output_arg { name: "result_shape" description: "1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is\nthe same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`\nis the max result set size across all `0...n-1` dimensions." type: DT_INT64 } attr { name: "set_operation" type: "string" } attr { name: "validate_indices" type: "bool" default_value { b: true } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_STRING } } } summary: "Applies set operation along last dimension of 2 `SparseTensor` inputs." description: "See SetOperationOp::SetOperationFromContext for values of `set_operation`.\n\nIf `validate_indices` is `True`, `SparseToSparseSetOperation` validates the\norder and range of `set1` and `set2` indices.\n\nInput `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`,\nand `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same\nas `set2`. Dimension `n` contains values in a set, duplicates are allowed but\nignored.\n\nInput `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,\nand `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same\nas `set1`. Dimension `n` contains values in a set, duplicates are allowed but\nignored.\n\nIf `validate_indices` is `True`, this op validates the order and range of `set1`\nand `set2` indices.\n\nOutput `result` is a `SparseTensor` represented by `result_indices`,\n`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this\nhas rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`\ndimension contains the result of `set_operation` applied to the corresponding\n`[0...n-1]` dimension of `set`." } op { name: "Split" input_arg { name: "split_dim" description: "0-D. The dimension along which to split. Must be in the range\n`[-rank(value), rank(value))`." type: DT_INT32 } input_arg { name: "value" description: "The tensor to split." type_attr: "T" } output_arg { name: "output" description: "They are identically shaped tensors, whose shape matches that of `value`\nexcept along `split_dim`, where their sizes are\n`values.shape[split_dim] / num_split`." type_attr: "T" number_attr: "num_split" } attr { name: "num_split" type: "int" description: "The number of ways to split. Must evenly divide\n`value.shape[split_dim]`." has_minimum: true minimum: 1 } attr { name: "T" type: "type" } summary: "Splits a tensor into `num_split` tensors along one dimension." } op { name: "SplitV" input_arg { name: "value" description: "The tensor to split." type_attr: "T" } input_arg { name: "size_splits" description: "list containing the sizes of each output tensor along the split\ndimension. Must sum to the dimension of value along split_dim.\nCan contain one -1 indicating that dimension is to be inferred." type_attr: "Tlen" } input_arg { name: "split_dim" description: "0-D. The dimension along which to split. Must be in the range\n`[-rank(value), rank(value))`." type: DT_INT32 } output_arg { name: "output" description: "Tensors whose shape matches that of `value`\nexcept along `split_dim`, where their sizes are\n`size_splits[i]`." type_attr: "T" number_attr: "num_split" } attr { name: "num_split" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } attr { name: "Tlen" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Splits a tensor into `num_split` tensors along one dimension." } op { name: "SqlDataset" input_arg { name: "driver_name" description: "The database type. Currently, the only supported type is \'sqlite\'." type: DT_STRING } input_arg { name: "data_source_name" description: "A connection string to connect to the database." type: DT_STRING } input_arg { name: "query" description: "A SQL query to execute." type: DT_STRING } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "Creates a dataset that executes a SQL query and emits rows of the result set." is_stateful: true } op { name: "Sqrt" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes square root of x element-wise." description: "I.e., \\\\(y = \\sqrt{x} = x^{1/2}\\\\)." } op { name: "SqrtGrad" input_arg { name: "y" type_attr: "T" } input_arg { name: "dy" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes the gradient for the sqrt of `x` wrt its input." description: "Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`\nis the corresponding input gradient." } op { name: "Square" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes square of x element-wise." description: "I.e., \\\\(y = x * x = x^2\\\\)." } op { name: "SquaredDifference" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Returns (x - y)(x - y) element-wise." description: "*NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" is_commutative: true } op { name: "Squeeze" input_arg { name: "input" description: "The `input` to squeeze." type_attr: "T" } output_arg { name: "output" description: "Contains the same data as `input`, but has one or more dimensions of\nsize 1 removed." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "squeeze_dims" type: "list(int)" default_value { list { } } description: "If specified, only squeezes the dimensions listed. The dimension\nindex starts at 0. It is an error to squeeze a dimension that is not 1. Must\nbe in the range `[-rank(input), rank(input))`." has_minimum: true } summary: "Removes dimensions of size 1 from the shape of a tensor." description: "Given a tensor `input`, this operation returns a tensor of the same type with\nall dimensions of size 1 removed. If you don\'t want to remove all size 1\ndimensions, you can remove specific size 1 dimensions by specifying\n`squeeze_dims`.\n\nFor example:\n\n```\n# \'t\' is a tensor of shape [1, 2, 1, 3, 1, 1]\nshape(squeeze(t)) ==> [2, 3]\n```\n\nOr, to remove specific size 1 dimensions:\n\n```\n# \'t\' is a tensor of shape [1, 2, 1, 3, 1, 1]\nshape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]\n```" } op { name: "Stack" output_arg { name: "handle" type: DT_STRING is_ref: true } attr { name: "elem_type" type: "type" } attr { name: "stack_name" type: "string" default_value { s: "" } } summary: "Deprecated, use StackV2." is_stateful: true } op { name: "StackClose" input_arg { name: "handle" type: DT_STRING is_ref: true } summary: "Deprecated, use StackCloseV2." } op { name: "StackCloseV2" input_arg { name: "handle" description: "The handle to a stack." type: DT_RESOURCE } summary: "Delete the stack from its resource container." is_stateful: true } op { name: "StackPop" input_arg { name: "handle" type: DT_STRING is_ref: true } output_arg { name: "elem" type_attr: "elem_type" } attr { name: "elem_type" type: "type" } summary: "Deprecated, use StackPopV2." } op { name: "StackPopV2" input_arg { name: "handle" description: "The handle to a stack." type: DT_RESOURCE } output_arg { name: "elem" description: "The tensor that is popped from the top of the stack." type_attr: "elem_type" } attr { name: "elem_type" type: "type" description: "The type of the elem that is popped." } summary: "Pop the element at the top of the stack." is_stateful: true } op { name: "StackPush" input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "elem" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "swap_memory" type: "bool" default_value { b: false } } summary: "Deprecated, use StackPushV2." } op { name: "StackPushV2" input_arg { name: "handle" description: "The handle to a stack." type: DT_RESOURCE } input_arg { name: "elem" description: "The tensor to be pushed onto the stack." type_attr: "T" } output_arg { name: "output" description: "The same tensor as the input \'elem\'." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "swap_memory" type: "bool" default_value { b: false } description: "Swap `elem` to CPU. Default to false." } summary: "Push an element onto the stack." is_stateful: true } op { name: "StackV2" input_arg { name: "max_size" description: "The maximum size of the stack if non-negative. If negative, the stack\nsize is unlimited." type: DT_INT32 } output_arg { name: "handle" description: "The handle to the stack." type: DT_RESOURCE } attr { name: "elem_type" type: "type" description: "The type of the elements on the stack." } attr { name: "stack_name" type: "string" default_value { s: "" } description: "Overrides the name used for the temporary stack resource. Default\nvalue is the name of the \'Stack\' op (which is guaranteed unique)." } summary: "A stack that produces elements in first-in last-out order." is_stateful: true } op { name: "Stage" input_arg { name: "values" description: "a list of tensors\ndtypes A list of data types that inserted values should adhere to." type_list_attr: "dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } description: "Maximum number of elements in the Staging Area. If > 0, inserts\non the container will block when the capacity is reached." has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } description: "The maximum number of bytes allowed for Tensors in the Staging Area.\nIf > 0, inserts will block until sufficient space is available." has_minimum: true } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this queue is placed in the given container. Otherwise,\na default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "It is necessary to match this name to the matching Unstage Op." } summary: "Stage values similar to a lightweight Enqueue." description: "The basic functionality of this Op is similar to a queue with many\nfewer capabilities and options. This Op is optimized for performance." is_stateful: true } op { name: "StageClear" attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } summary: "Op removes all elements in the underlying container." is_stateful: true } op { name: "StagePeek" input_arg { name: "index" type: DT_INT32 } output_arg { name: "values" type_list_attr: "dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } summary: "Op peeks at the values at the specified index. If the" description: "underlying container does not contain sufficient elements\nthis op will block until it does. This Op is optimized for\nperformance." is_stateful: true } op { name: "StageSize" output_arg { name: "size" type: DT_INT32 } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } summary: "Op returns the number of elements in the underlying container." is_stateful: true } op { name: "StatelessRandomNormal" input_arg { name: "shape" description: "The shape of the output tensor." type_attr: "T" } input_arg { name: "seed" description: "2 seeds (shape [2])." type: DT_INT64 } output_arg { name: "output" description: "Random values with specified shape." type_attr: "dtype" } attr { name: "dtype" type: "type" default_value { type: DT_FLOAT } description: "The type of the output." allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Outputs deterministic pseudorandom values from a normal distribution." description: "The generated values will have mean 0 and standard deviation 1.\n\nThe outputs are a deterministic function of `shape` and `seed`." } op { name: "StatelessRandomUniform" input_arg { name: "shape" description: "The shape of the output tensor." type_attr: "T" } input_arg { name: "seed" description: "2 seeds (shape [2])." type: DT_INT64 } output_arg { name: "output" description: "Random values with specified shape." type_attr: "dtype" } attr { name: "dtype" type: "type" default_value { type: DT_FLOAT } description: "The type of the output." allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Outputs deterministic pseudorandom random values from a uniform distribution." description: "The generated values follow a uniform distribution in the range `[0, 1)`. The\nlower bound 0 is included in the range, while the upper bound 1 is excluded.\n\nThe outputs are a deterministic function of `shape` and `seed`." } op { name: "StatelessTruncatedNormal" input_arg { name: "shape" description: "The shape of the output tensor." type_attr: "T" } input_arg { name: "seed" description: "2 seeds (shape [2])." type: DT_INT64 } output_arg { name: "output" description: "Random values with specified shape." type_attr: "dtype" } attr { name: "dtype" type: "type" default_value { type: DT_FLOAT } description: "The type of the output." allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Outputs deterministic pseudorandom values from a truncated normal distribution." description: "The generated values follow a normal distribution with mean 0 and standard\ndeviation 1, except that values whose magnitude is more than 2 standard\ndeviations from the mean are dropped and re-picked.\n\nThe outputs are a deterministic function of `shape` and `seed`." } op { name: "StopGradient" input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } summary: "Stops gradient computation." description: "When executed in a graph, this op outputs its input tensor as-is.\n\nWhen building ops to compute gradients, this op prevents the contribution of\nits inputs to be taken into account. Normally, the gradient generator adds ops\nto a graph to compute the derivatives of a specified \'loss\' by recursively\nfinding out inputs that contributed to its computation. If you insert this op\nin the graph it inputs are masked from the gradient generator. They are not\ntaken into account for computing gradients.\n\nThis is useful any time you want to compute a value with TensorFlow but need\nto pretend that the value was a constant. Some examples include:\n\n* The *EM* algorithm where the *M-step* should not involve backpropagation\n through the output of the *E-step*.\n* Contrastive divergence training of Boltzmann machines where, when\n differentiating the energy function, the training must not backpropagate\n through the graph that generated the samples from the model.\n* Adversarial training, where no backprop should happen through the adversarial\n example generation process." } op { name: "StridedSlice" input_arg { name: "input" type_attr: "T" } input_arg { name: "begin" description: "`begin[k]` specifies the offset into the `k`th range specification.\nThe exact dimension this corresponds to will be determined by context.\nOut-of-bounds values will be silently clamped. If the `k`th bit of\n`begin_mask` then `begin[k]` is ignored and the full range of the\nappropriate dimension is used instead. Negative values causes indexing\nto start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`." type_attr: "Index" } input_arg { name: "end" description: "`end[i]` is like `begin` with the exception that `end_mask` is\nused to determine full ranges." type_attr: "Index" } input_arg { name: "strides" description: "`strides[i]` specifies the increment in the `i`th specification\nafter extracting a given element. Negative indices will reverse\nthe original order. Out or range values are\nclamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`" type_attr: "Index" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Index" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "begin_mask" type: "int" default_value { i: 0 } description: "a bitmask where a bit i being 1 means to ignore the begin\nvalue and instead use the largest interval possible. At runtime\nbegin[i] will be replaced with `[0, n-1) if `stride[i] > 0` or\n`[-1, n-1]` if `stride[i] < 0`" } attr { name: "end_mask" type: "int" default_value { i: 0 } description: "analogous to `begin_mask`" } attr { name: "ellipsis_mask" type: "int" default_value { i: 0 } description: "a bitmask where bit `i` being 1 means the `i`th\nposition is actually an ellipsis. One bit at most can be 1.\nIf `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)`\nis provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis\nimplicitly creates as many range specifications as necessary to fully\nspecify the sliced range for every dimension. For example for a 4-dimensional\ntensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`." } attr { name: "new_axis_mask" type: "int" default_value { i: 0 } description: "a bitmask where bit `i` being 1 means the `i`th\nspecification creates a new shape 1 dimension. For example\n`foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor." } attr { name: "shrink_axis_mask" type: "int" default_value { i: 0 } description: "a bitmask where bit `i` implies that the `i`th\nspecification should shrink the dimensionality. begin and end\nmust imply a slice of size 1 in the dimension. For example in\npython one might do `foo[:, 3, :]` which would result in\n`shrink_axis_mask` being 2." } summary: "Return a strided slice from `input`." description: "Note, most python users will want to use the Python `Tensor.__getitem__`\nor `Variable.__getitem__` rather than this op directly.\n\nThe goal of this op is to produce a new tensor with a subset of\nthe elements from the `n` dimensional `input` tensor. The subset is chosen using\na sequence of `m` sparse range specifications encoded into the arguments\nof this function. Note, in some cases\n`m` could be equal to `n`, but this need not be the case. Each\nrange specification entry can be one of the following:\n\n- An ellipsis (...). Ellipses are used to imply zero or more\n dimensions of full-dimension selection and are produced using\n `ellipsis_mask`. For example, `foo[...]` is the identity slice.\n\n- A new axis. This is used to insert a new shape=1 dimension and is\n produced using `new_axis_mask`. For example, `foo[:, ...]` where\n `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.\n\n\n- A range `begin:end:stride`. This is used to specify how much to choose from\n a given dimension. `stride` can be any integer but 0. `begin` is an integer\n which represents the index of the first value to select while `end` represents\n the index of the last value to select. The number of values selected in each\n dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.\n `begin` and `end` can be negative where `-1` is the last element, `-2` is\n the second to last. `begin_mask` controls whether to replace the explicitly\n given `begin` with an implicit effective value of `0` if `stride > 0` and\n `-1` if `stride < 0`. `end_mask` is analogous but produces the number\n required to create the largest open interval. For example, given a shape\n `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do\n not assume this is equivalent to `foo[0:-1]` which has an effective `begin`\n and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the\n first dimension of a tensor while dropping the last two (in the original\n order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.\n\n- A single index. This is used to keep only elements that have a given\n index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a\n shape `(6,)` tensor. This is encoded in `begin` and `end` and\n `shrink_axis_mask`.\n\nEach conceptual range specification is encoded in the op\'s argument. This\nencoding is best understand by considering a non-trivial example. In\nparticular,\n`foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as\n\n```\nbegin = [1, 2, x, x, 0, x] # x denotes don\'t care (usually 0)\nend = [2, 4, x, x, -3, x]\nstrides = [1, 1, x, x, -1, 1]\nbegin_mask = 1<<4 | 1 << 5 = 48\nend_mask = 1<<5 = 32\nellipsis_mask = 1<<3 = 8\nnew_axis_mask = 1<<2 4\nshrink_axis_mask = 1<<0\n```\n\nIn this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of\nthe slice becomes (2, 1, 5, 5, 2, 5).\nLet us walk step by step through each argument specification.\n\n1. The first argument in the example slice is turned into `begin = 1` and\n`end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we\nalso set the appropriate bit in `shrink_axis_mask`.\n\n2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have\nzero bits contributed.\n\n3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1\ndimension in the final shape. Dummy values are contributed to begin,\nend and stride, while the new_axis_mask bit is set.\n\n4. `...` grab the full ranges from as many dimensions as needed to\nfully specify a slice for every dimension of the input shape.\n\n5. `:-3:-1` shows the use of negative indices. A negative index `i` associated\nwith a dimension that has shape `s` is converted to a positive index\n`s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion\nis done internally so begin, end and strides receive x, -3, and -1.\nThe appropriate begin_mask bit is set to indicate the start range is the\nfull range (ignoring the x).\n\n6. `:` indicates that the entire contents of the corresponding dimension\nis selected. This is equivalent to `::` or `0::1`. begin, end, and strides\nreceive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and\n`end_mask` are also set.\n\n*Requirements*:\n `0 != strides[i] for i in [0, m)`\n `ellipsis_mask must be a power of two (only one ellipsis)`" } op { name: "StridedSliceAssign" input_arg { name: "ref" type_attr: "T" is_ref: true } input_arg { name: "begin" type_attr: "Index" } input_arg { name: "end" type_attr: "Index" } input_arg { name: "strides" type_attr: "Index" } input_arg { name: "value" type_attr: "T" } output_arg { name: "output_ref" type_attr: "T" is_ref: true } attr { name: "T" type: "type" } attr { name: "Index" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "begin_mask" type: "int" default_value { i: 0 } } attr { name: "end_mask" type: "int" default_value { i: 0 } } attr { name: "ellipsis_mask" type: "int" default_value { i: 0 } } attr { name: "new_axis_mask" type: "int" default_value { i: 0 } } attr { name: "shrink_axis_mask" type: "int" default_value { i: 0 } } summary: "Assign `value` to the sliced l-value reference of `ref`." description: "The values of `value` are assigned to the positions in the variable\n`ref` that are selected by the slice parameters. The slice parameters\n`begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.\n\nNOTE this op currently does not support broadcasting and so `value`\'s\nshape must be exactly the shape produced by the slice of `ref`." } op { name: "StridedSliceGrad" input_arg { name: "shape" type_attr: "Index" } input_arg { name: "begin" type_attr: "Index" } input_arg { name: "end" type_attr: "Index" } input_arg { name: "strides" type_attr: "Index" } input_arg { name: "dy" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Index" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "begin_mask" type: "int" default_value { i: 0 } } attr { name: "end_mask" type: "int" default_value { i: 0 } } attr { name: "ellipsis_mask" type: "int" default_value { i: 0 } } attr { name: "new_axis_mask" type: "int" default_value { i: 0 } } attr { name: "shrink_axis_mask" type: "int" default_value { i: 0 } } summary: "Returns the gradient of `StridedSlice`." description: "Since `StridedSlice` cuts out pieces of its `input` which is size\n`shape`, its gradient will have the same shape (which is passed here\nas `shape`). The gradient will be zero in any element that the slice\ndoes not select.\n\nArguments are the same as StridedSliceGrad with the exception that\n`dy` is the input gradient to be propagated and `shape` is the\nshape of `StridedSlice`\'s `input`." } op { name: "StringJoin" input_arg { name: "inputs" description: "A list of string tensors. The tensors must all have the same shape,\nor be scalars. Scalars may be mixed in; these will be broadcast to the shape\nof non-scalar inputs." type: DT_STRING number_attr: "N" } output_arg { name: "output" type: DT_STRING } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "separator" type: "string" default_value { s: "" } description: "string, an optional join separator." } summary: "Joins the strings in the given list of string tensors into one tensor;" description: "with the given separator (default is an empty separator)." } op { name: "StringSplit" input_arg { name: "input" description: "1-D. Strings to split." type: DT_STRING } input_arg { name: "delimiter" description: "0-D. Delimiter characters (bytes), or empty string." type: DT_STRING } output_arg { name: "indices" description: "A dense matrix of int64 representing the indices of the sparse tensor." type: DT_INT64 } output_arg { name: "values" description: "A vector of strings corresponding to the splited values." type: DT_STRING } output_arg { name: "shape" description: "a length-2 vector of int64 representing the shape of the sparse\ntensor, where the first value is N and the second value is the maximum number\nof tokens in a single input entry." type: DT_INT64 } attr { name: "skip_empty" type: "bool" default_value { b: true } description: "A `bool`. If `True`, skip the empty strings from the result." } summary: "Split elements of `input` based on `delimiter` into a `SparseTensor`." description: "Let N be the size of source (typically N will be the batch size). Split each\nelement of `input` based on `delimiter` and return a `SparseTensor`\ncontaining the splitted tokens. Empty tokens are ignored.\n\n`delimiter` can be empty, or a string of split characters. If `delimiter` is an\n empty string, each element of `input` is split into individual single-byte\n character strings, including splitting of UTF-8 multibyte sequences. Otherwise\n every character of `delimiter` is a potential split point.\n\nFor example:\n N = 2, input[0] is \'hello world\' and input[1] is \'a b c\', then the output\n will be\n\n indices = [0, 0;\n 0, 1;\n 1, 0;\n 1, 1;\n 1, 2]\n shape = [2, 3]\n values = [\'hello\', \'world\', \'a\', \'b\', \'c\']" } op { name: "StringToHashBucket" input_arg { name: "string_tensor" type: DT_STRING } output_arg { name: "output" description: "A Tensor of the same shape as the input `string_tensor`." type: DT_INT64 } attr { name: "num_buckets" type: "int" description: "The number of buckets." has_minimum: true minimum: 1 } summary: "Converts each string in the input Tensor to its hash mod by a number of buckets." description: "The hash function is deterministic on the content of the string within the\nprocess.\n\nNote that the hash function may change from time to time.\nThis functionality will be deprecated and it\'s recommended to use\n`tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`." } op { name: "StringToHashBucketFast" input_arg { name: "input" description: "The strings to assign a hash bucket." type: DT_STRING } output_arg { name: "output" description: "A Tensor of the same shape as the input `string_tensor`." type: DT_INT64 } attr { name: "num_buckets" type: "int" description: "The number of buckets." has_minimum: true minimum: 1 } summary: "Converts each string in the input Tensor to its hash mod by a number of buckets." description: "The hash function is deterministic on the content of the string within the\nprocess and will never change. However, it is not suitable for cryptography.\nThis function may be used when CPU time is scarce and inputs are trusted or\nunimportant. There is a risk of adversaries constructing inputs that all hash\nto the same bucket. To prevent this problem, use a strong hash function with\n`tf.string_to_hash_bucket_strong`." } op { name: "StringToHashBucketStrong" input_arg { name: "input" description: "The strings to assign a hash bucket." type: DT_STRING } output_arg { name: "output" description: "A Tensor of the same shape as the input `string_tensor`." type: DT_INT64 } attr { name: "num_buckets" type: "int" description: "The number of buckets." has_minimum: true minimum: 1 } attr { name: "key" type: "list(int)" description: "The key for the keyed hash function passed as a list of two uint64\nelements." } summary: "Converts each string in the input Tensor to its hash mod by a number of buckets." description: "The hash function is deterministic on the content of the string within the\nprocess. The hash function is a keyed hash function, where attribute `key`\ndefines the key of the hash function. `key` is an array of 2 elements.\n\nA strong hash is important when inputs may be malicious, e.g. URLs with\nadditional components. Adversaries could try to make their inputs hash to the\nsame bucket for a denial-of-service attack or to skew the results. A strong\nhash prevents this by making it difficult, if not infeasible, to compute inputs\nthat hash to the same bucket. This comes at a cost of roughly 4x higher compute\ntime than `tf.string_to_hash_bucket_fast`." } op { name: "StringToNumber" input_arg { name: "string_tensor" type: DT_STRING } output_arg { name: "output" description: "A Tensor of the same shape as the input `string_tensor`." type_attr: "out_type" } attr { name: "out_type" type: "type" default_value { type: DT_FLOAT } description: "The numeric type to interpret each string in `string_tensor` as." allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } summary: "Converts each string in the input Tensor to the specified numeric type." description: "(Note that int32 overflow results in an error while float overflow\nresults in a rounded value.)" } op { name: "Sub" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Returns x - y element-wise." description: "*NOTE*: `Sub` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" } op { name: "Substr" input_arg { name: "input" description: "Tensor of strings" type: DT_STRING } input_arg { name: "pos" description: "Scalar defining the position of first character in each substring" type_attr: "T" } input_arg { name: "len" description: "Scalar defining the number of characters to include in each substring" type_attr: "T" } output_arg { name: "output" description: "Tensor of substrings" type: DT_STRING } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Return substrings from `Tensor` of strings." description: "For each string in the input `Tensor`, creates a substring starting at index\n`pos` with a total length of `len`.\n\nIf `len` defines a substring that would extend beyond the length of the input\nstring, then as many characters as possible are used.\n\nIf `pos` is negative or specifies a character index larger than any of the input\nstrings, then an `InvalidArgumentError` is thrown.\n\n`pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on\nOp creation.\n\n*NOTE*: `Substr` supports broadcasting up to two dimensions. More about\nbroadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\n---\n\nExamples\n\nUsing scalar `pos` and `len`:\n\n```python\ninput = [b\'Hello\', b\'World\']\nposition = 1\nlength = 3\n\noutput = [b\'ell\', b\'orl\']\n```\n\nUsing `pos` and `len` with same shape as `input`:\n\n```python\ninput = [[b\'ten\', b\'eleven\', b\'twelve\'],\n [b\'thirteen\', b\'fourteen\', b\'fifteen\'],\n [b\'sixteen\', b\'seventeen\', b\'eighteen\']]\nposition = [[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]]\nlength = [[2, 3, 4],\n [4, 3, 2],\n [5, 5, 5]]\n\noutput = [[b\'en\', b\'eve\', b\'lve\'],\n [b\'hirt\', b\'urt\', b\'te\'],\n [b\'ixtee\', b\'vente\', b\'hteen\']]\n```\n\nBroadcasting `pos` and `len` onto `input`:\n\n```\ninput = [[b\'ten\', b\'eleven\', b\'twelve\'],\n [b\'thirteen\', b\'fourteen\', b\'fifteen\'],\n [b\'sixteen\', b\'seventeen\', b\'eighteen\'],\n [b\'nineteen\', b\'twenty\', b\'twentyone\']]\nposition = [1, 2, 3]\nlength = [1, 2, 3]\n\noutput = [[b\'e\', b\'ev\', b\'lve\'],\n [b\'h\', b\'ur\', b\'tee\'],\n [b\'i\', b\'ve\', b\'hte\'],\n [b\'i\', b\'en\', b\'nty\']]\n```\n\nBroadcasting `input` onto `pos` and `len`:\n\n```\ninput = b\'thirteen\'\nposition = [1, 5, 7]\nlength = [3, 2, 1]\n\noutput = [b\'hir\', b\'ee\', b\'n\']\n```" } op { name: "Sum" input_arg { name: "input" description: "The tensor to reduce." type_attr: "T" } input_arg { name: "reduction_indices" description: "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`." type_attr: "Tidx" } output_arg { name: "output" description: "The reduced tensor." type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } description: "If true, retain reduced dimensions with length 1." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Computes the sum of elements across dimensions of a tensor." description: "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1." } op { name: "Svd" input_arg { name: "input" description: "A tensor of shape `[..., M, N]` whose inner-most 2 dimensions\nform matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`." type_attr: "T" } output_arg { name: "s" description: "Singular values. Shape is `[..., P]`." type_attr: "T" } output_arg { name: "u" description: "Left singular vectors. If `full_matrices` is `False` then shape is\n`[..., M, P]`; if `full_matrices` is `True` then shape is\n`[..., M, M]`. Undefined if `compute_uv` is `False`." type_attr: "T" } output_arg { name: "v" description: "Left singular vectors. If `full_matrices` is `False` then shape is\n`[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`.\nUndefined if `compute_uv` is false." type_attr: "T" } attr { name: "compute_uv" type: "bool" default_value { b: true } description: "If true, left and right singular vectors will be\ncomputed and returned in `u` and `v`, respectively.\nIf false, `u` and `v` are not set and should never referenced." } attr { name: "full_matrices" type: "bool" default_value { b: false } description: "If true, compute full-sized `u` and `v`. If false\n(the default), compute only the leading `P` singular vectors.\nIgnored if `compute_uv` is `False`." } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes the singular value decompositions of one or more matrices." description: "Computes the SVD of each inner matrix in `input` such that\n`input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`\n\n```python\n# a is a tensor containing a batch of matrices.\n# s is a tensor of singular values for each matrix.\n# u is the tensor containing of left singular vectors for each matrix.\n# v is the tensor containing of right singular vectors for each matrix.\ns, u, v = svd(a)\ns, _, _ = svd(a, compute_uv=False)\n```" } op { name: "Switch" input_arg { name: "data" description: "The tensor to be forwarded to the appropriate output." type_attr: "T" } input_arg { name: "pred" description: "A scalar that specifies which output port will receive data." type: DT_BOOL } output_arg { name: "output_false" description: "If `pred` is false, data will be forwarded to this output." type_attr: "T" } output_arg { name: "output_true" description: "If `pred` is true, data will be forwarded to this output." type_attr: "T" } attr { name: "T" type: "type" } summary: "Forwards `data` to the output port determined by `pred`." description: "If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,\nthe data goes to `output_false`.\n\nSee also `RefSwitch` and `Merge`." } op { name: "SymbolicGradient" input_arg { name: "input" description: "a list of input tensors of size N + M;" type_list_attr: "Tin" } output_arg { name: "output" description: "a list of output tensors of size N;" type_list_attr: "Tout" } attr { name: "Tin" type: "list(type)" description: "the type list for the input list." has_minimum: true minimum: 1 } attr { name: "Tout" type: "list(type)" description: "the type list for the input list." has_minimum: true minimum: 1 } attr { name: "f" type: "func" description: "The function we want to compute the gradient for.\n\nThe function \'f\' must be a numerical function which takes N inputs and\nproduces M outputs. Its gradient function \'g\', which is computed by\nthis SymbolicGradient op is a function taking N + M inputs and\nproduces N outputs.\n\nI.e. if we have\n (y1, y2, ..., y_M) = f(x1, x2, ..., x_N),\nthen, g is\n (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N,\n dL/dy1, dL/dy2, ..., dL/dy_M),\n\nwhere L is a scalar-value function of (x1, x2, ..., xN) (e.g., the\nloss function). dL/dx_i is the partial derivative of L with respect\nto x_i.\n\n(Needs some math expert to say the comment above better.)" } summary: "Computes the gradient function for function f via backpropagation." } op { name: "TFRecordDataset" input_arg { name: "filenames" description: "A scalar or vector containing the name(s) of the file(s) to be\nread." type: DT_STRING } input_arg { name: "compression_type" description: "A scalar containing either (i) the empty string (no\ncompression), (ii) \"ZLIB\", or (iii) \"GZIP\"." type: DT_STRING } input_arg { name: "buffer_size" description: "A scalar representing the number of bytes to buffer. A value of\n0 means no buffering will be performed." type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } summary: "Creates a dataset that emits the records from one or more TFRecord files." is_stateful: true } op { name: "TFRecordReader" output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } attr { name: "compression_type" type: "string" default_value { s: "" } } summary: "A Reader that outputs the records from a TensorFlow Records file." is_stateful: true } op { name: "TFRecordReaderV2" output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } attr { name: "compression_type" type: "string" default_value { s: "" } } summary: "A Reader that outputs the records from a TensorFlow Records file." is_stateful: true } op { name: "TakeDataset" input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "count" description: "A scalar representing the number of elements from the `input_dataset`\nthat should be taken. A value of `-1` indicates that all of `input_dataset`\nis taken." type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "Creates a dataset that contains `count` elements from the `input_dataset`." } op { name: "TakeManySparseFromTensorsMap" input_arg { name: "sparse_handles" description: "1-D, The `N` serialized `SparseTensor` objects.\nShape: `[N]`." type: DT_INT64 } output_arg { name: "sparse_indices" description: "2-D. The `indices` of the minibatch `SparseTensor`." type: DT_INT64 } output_arg { name: "sparse_values" description: "1-D. The `values` of the minibatch `SparseTensor`." type_attr: "dtype" } output_arg { name: "sparse_shape" description: "1-D. The `shape` of the minibatch `SparseTensor`." type: DT_INT64 } attr { name: "dtype" type: "type" description: "The `dtype` of the `SparseTensor` objects stored in the\n`SparseTensorsMap`." } attr { name: "container" type: "string" default_value { s: "" } description: "The container name for the `SparseTensorsMap` read by this op." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "The shared name for the `SparseTensorsMap` read by this op.\nIt should not be blank; rather the `shared_name` or unique Operation name\nof the Op that created the original `SparseTensorsMap` should be used." } summary: "Read `SparseTensors` from a `SparseTensorsMap` and concatenate them." description: "The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where\n`N` is the minibatch size and the rows correspond to the output handles of\n`AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the\noriginal `SparseTensor` objects that went into the given input ops must all\nmatch. When the final `SparseTensor` is created, it has rank one\nhigher than the ranks of the incoming `SparseTensor` objects\n(they have been concatenated along a new row dimension on the left).\n\nThe output `SparseTensor` object\'s shape values for all dimensions but the\nfirst are the max across the input `SparseTensor` objects\' shape values\nfor the corresponding dimensions. Its first shape value is `N`, the minibatch\nsize.\n\nThe input `SparseTensor` objects\' indices are assumed ordered in\nstandard lexicographic order. If this is not the case, after this\nstep run `SparseReorder` to restore index ordering.\n\nFor example, if the handles represent an input, which is a `[2, 3]` matrix\nrepresenting two original `SparseTensor` objects:\n\n```\n index = [ 0]\n [10]\n [20]\n values = [1, 2, 3]\n shape = [50]\n```\n\nand\n\n```\n index = [ 2]\n [10]\n values = [4, 5]\n shape = [30]\n```\n\nthen the final `SparseTensor` will be:\n\n```\n index = [0 0]\n [0 10]\n [0 20]\n [1 2]\n [1 10]\n values = [1, 2, 3, 4, 5]\n shape = [2 50]\n```" is_stateful: true } op { name: "Tan" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes tan of x element-wise." } op { name: "Tanh" input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes hyperbolic tangent of `x` element-wise." } op { name: "TanhGrad" input_arg { name: "y" type_attr: "T" } input_arg { name: "dy" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Computes the gradient for the tanh of `x` wrt its input." description: "Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`\nis the corresponding input gradient." } op { name: "TemporaryVariable" output_arg { name: "ref" description: "A reference to the variable tensor." type_attr: "dtype" is_ref: true } attr { name: "shape" type: "shape" description: "The shape of the variable tensor." } attr { name: "dtype" type: "type" description: "The type of elements in the variable tensor." } attr { name: "var_name" type: "string" default_value { s: "" } description: "Overrides the name used for the temporary variable resource. Default\nvalue is the name of the \'TemporaryVariable\' op (which is guaranteed unique)." } summary: "Returns a tensor that may be mutated, but only persists within a single step." description: "This is an experimental op for internal use only and it is possible to use this\nop in unsafe ways. DO NOT USE unless you fully understand the risks.\n\nIt is the caller\'s responsibility to ensure that \'ref\' is eventually passed to a\nmatching \'DestroyTemporaryVariable\' op after all other uses have completed.\n\nOutputs a ref to the tensor state so it may be read or modified.\n\n E.g.\n var = state_ops._temporary_variable([1, 2], types.float_)\n var_name = var.op.name\n var = state_ops.assign(var, [[4.0, 5.0]])\n var = state_ops.assign_add(var, [[6.0, 7.0]])\n final = state_ops._destroy_temporary_variable(var, var_name=var_name)" is_stateful: true } op { name: "TensorArray" input_arg { name: "size" type: DT_INT32 } output_arg { name: "handle" type: DT_STRING is_ref: true } attr { name: "dtype" type: "type" } attr { name: "dynamic_size" type: "bool" default_value { b: false } } attr { name: "clear_after_read" type: "bool" default_value { b: true } } attr { name: "tensor_array_name" type: "string" default_value { s: "" } } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } } deprecation { version: 16 explanation: "Use TensorArrayV3" } is_stateful: true } op { name: "TensorArrayClose" input_arg { name: "handle" type: DT_STRING is_ref: true } deprecation { version: 16 explanation: "Use TensorArrayCloseV3" } } op { name: "TensorArrayCloseV2" input_arg { name: "handle" type: DT_STRING } summary: "Deprecated. Use TensorArrayCloseV3" } op { name: "TensorArrayCloseV3" input_arg { name: "handle" description: "The handle to a TensorArray (output of TensorArray or TensorArrayGrad)." type: DT_RESOURCE } summary: "Delete the TensorArray from its resource container." description: "This enables the user to close and release the resource in the middle\nof a step/run." is_stateful: true } op { name: "TensorArrayConcat" input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } output_arg { name: "lengths" type: DT_INT64 } attr { name: "dtype" type: "type" } attr { name: "element_shape_except0" type: "shape" default_value { shape { unknown_rank: true } } } deprecation { version: 16 explanation: "Use TensorArrayGradV3" } } op { name: "TensorArrayConcatV2" input_arg { name: "handle" type: DT_STRING } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } output_arg { name: "lengths" type: DT_INT64 } attr { name: "dtype" type: "type" } attr { name: "element_shape_except0" type: "shape" default_value { shape { unknown_rank: true } } } summary: "Deprecated. Use TensorArrayConcatV3" } op { name: "TensorArrayConcatV3" input_arg { name: "handle" description: "The handle to a TensorArray." type: DT_RESOURCE } input_arg { name: "flow_in" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } output_arg { name: "value" description: "All of the elements in the TensorArray, concatenated along the first\naxis." type_attr: "dtype" } output_arg { name: "lengths" description: "A vector of the row sizes of the original T elements in the\nvalue output. In the example above, this would be the values:\n`(n1, n2, ..., n(T-1))`." type: DT_INT64 } attr { name: "dtype" type: "type" description: "The type of the elem that is returned." } attr { name: "element_shape_except0" type: "shape" default_value { shape { unknown_rank: true } } description: "The expected shape of an element, if known,\nexcluding the first dimension. Used to validate the shapes of\nTensorArray elements. If this shape is not fully specified, concatenating\nzero-size TensorArrays is an error." } summary: "Concat the elements from the TensorArray into value `value`." description: "Takes `T` elements of shapes\n\n ```\n (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)\n ```\n\nand concatenates them into a Tensor of shape:\n\n ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```\n\nAll elements must have the same shape (excepting the first dimension)." is_stateful: true } op { name: "TensorArrayGather" input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } } deprecation { version: 16 explanation: "Use TensorArrayGatherV3" } } op { name: "TensorArrayGatherV2" input_arg { name: "handle" type: DT_STRING } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } } summary: "Deprecated. Use TensorArrayGatherV3" } op { name: "TensorArrayGatherV3" input_arg { name: "handle" description: "The handle to a TensorArray." type: DT_RESOURCE } input_arg { name: "indices" description: "The locations in the TensorArray from which to read tensor elements." type: DT_INT32 } input_arg { name: "flow_in" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } output_arg { name: "value" description: "All of the elements in the TensorArray, concatenated along a new\naxis (the new dimension 0)." type_attr: "dtype" } attr { name: "dtype" type: "type" description: "The type of the elem that is returned." } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } description: "The expected shape of an element, if known. Used to\nvalidate the shapes of TensorArray elements. If this shape is not\nfully specified, gathering zero-size TensorArrays is an error." } summary: "Gather specific elements from the TensorArray into output `value`." description: "All elements selected by `indices` must have the same shape." is_stateful: true } op { name: "TensorArrayGrad" input_arg { name: "handle" type: DT_STRING } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "grad_handle" type: DT_STRING is_ref: true } attr { name: "source" type: "string" } deprecation { version: 16 explanation: "Use TensorArrayGradV3" } is_stateful: true } op { name: "TensorArrayGradV2" input_arg { name: "handle" type: DT_STRING } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "grad_handle" type: DT_STRING } attr { name: "source" type: "string" } summary: "Deprecated. Use TensorArrayGradV3" is_stateful: true } op { name: "TensorArrayGradV3" input_arg { name: "handle" description: "The handle to the forward TensorArray." type: DT_RESOURCE } input_arg { name: "flow_in" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } output_arg { name: "grad_handle" type: DT_RESOURCE } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "source" type: "string" description: "The gradient source string, used to decide which gradient TensorArray\nto return." } summary: "Creates a TensorArray for storing the gradients of values in the given handle." description: "If the given TensorArray gradient already exists, returns a reference to it.\n\nLocks the size of the original TensorArray by disabling its dynamic size flag.\n\n**A note about the input flow_in:**\n\nThe handle flow_in forces the execution of the gradient lookup to occur\nonly after certain other operations have occurred. For example, when\nthe forward TensorArray is dynamically sized, writes to this TensorArray\nmay resize the object. The gradient TensorArray is statically sized based\non the size of the forward TensorArray when this operation executes.\nFurthermore, the size of the forward TensorArray is frozen by this call.\nAs a result, the flow is used to ensure that the call to generate the gradient\nTensorArray only happens after all writes are executed.\n\nIn the case of dynamically sized TensorArrays, gradient computation should\nonly be performed on read operations that have themselves been chained via\nflow to occur only after all writes have executed. That way the final size\nof the forward TensorArray is known when this operation is called.\n\n**A note about the source attribute:**\n\nTensorArray gradient calls use an accumulator TensorArray object. If\nmultiple gradients are calculated and run in the same session, the multiple\ngradient nodes may accidentally flow through the same accumulator TensorArray.\nThis double counts and generally breaks the TensorArray gradient flow.\n\nThe solution is to identify which gradient call this particular\nTensorArray gradient is being called in. This is performed by identifying\na unique string (e.g. \"gradients\", \"gradients_1\", ...) from the input\ngradient Tensor\'s name. This string is used as a suffix when creating\nthe TensorArray gradient object here (the attribute `source`).\n\nThe attribute `source` is added as a suffix to the forward TensorArray\'s\nname when performing the creation / lookup, so that each separate gradient\ncalculation gets its own TensorArray accumulator." is_stateful: true } op { name: "TensorArrayPack" input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } } deprecation { version: 16 explanation: "Use TensorArrayGatherV3 with RangeOp" } } op { name: "TensorArrayRead" input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "index" type: DT_INT32 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } deprecation { version: 16 explanation: "Use TensorArrayReadV3" } } op { name: "TensorArrayReadV2" input_arg { name: "handle" type: DT_STRING } input_arg { name: "index" type: DT_INT32 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } summary: "Deprecated. Use TensorArrayReadV3" } op { name: "TensorArrayReadV3" input_arg { name: "handle" description: "The handle to a TensorArray." type: DT_RESOURCE } input_arg { name: "index" type: DT_INT32 } input_arg { name: "flow_in" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } output_arg { name: "value" description: "The tensor that is read from the TensorArray." type_attr: "dtype" } attr { name: "dtype" type: "type" description: "The type of the elem that is returned." } summary: "Read an element from the TensorArray into output `value`." is_stateful: true } op { name: "TensorArrayScatter" input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "value" type_attr: "T" } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } deprecation { version: 19 explanation: "Use TensorArrayGradV3" } } op { name: "TensorArrayScatterV2" input_arg { name: "handle" type: DT_STRING } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "value" type_attr: "T" } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } summary: "Deprecated. Use TensorArrayScatterV3" } op { name: "TensorArrayScatterV3" input_arg { name: "handle" description: "The handle to a TensorArray." type: DT_RESOURCE } input_arg { name: "indices" description: "The locations at which to write the tensor elements." type: DT_INT32 } input_arg { name: "value" description: "The concatenated tensor to write to the TensorArray." type_attr: "T" } input_arg { name: "flow_in" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } output_arg { name: "flow_out" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } attr { name: "T" type: "type" } summary: "Scatter the data from the input value into specific TensorArray elements." description: "`indices` must be a vector, its length must match the first dim of `value`." is_stateful: true } op { name: "TensorArraySize" input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "size" type: DT_INT32 } deprecation { version: 16 explanation: "Use TensorArraySizeV3" } } op { name: "TensorArraySizeV2" input_arg { name: "handle" type: DT_STRING } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "size" type: DT_INT32 } summary: "Deprecated. Use TensorArraySizeV3" } op { name: "TensorArraySizeV3" input_arg { name: "handle" description: "The handle to a TensorArray (output of TensorArray or TensorArrayGrad)." type: DT_RESOURCE } input_arg { name: "flow_in" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } output_arg { name: "size" description: "The current size of the TensorArray." type: DT_INT32 } summary: "Get the current size of the TensorArray." is_stateful: true } op { name: "TensorArraySplit" input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "value" type_attr: "T" } input_arg { name: "lengths" type: DT_INT64 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } deprecation { version: 16 explanation: "Use TensorArraySplitV3" } } op { name: "TensorArraySplitV2" input_arg { name: "handle" type: DT_STRING } input_arg { name: "value" type_attr: "T" } input_arg { name: "lengths" type: DT_INT64 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } summary: "Deprecated. Use TensorArraySplitV3" } op { name: "TensorArraySplitV3" input_arg { name: "handle" description: "The handle to a TensorArray." type: DT_RESOURCE } input_arg { name: "value" description: "The concatenated tensor to write to the TensorArray." type_attr: "T" } input_arg { name: "lengths" description: "The vector of lengths, how to split the rows of value into the\nTensorArray." type: DT_INT64 } input_arg { name: "flow_in" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } output_arg { name: "flow_out" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } attr { name: "T" type: "type" } summary: "Split the data from the input value into TensorArray elements." description: "Assuming that `lengths` takes on values\n\n ```(n0, n1, ..., n(T-1))```\n\nand that `value` has shape\n\n ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,\n\nthis splits values into a TensorArray with T tensors.\n\nTensorArray index t will be the subtensor of values with starting position\n\n ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```\n\nand having size\n\n ```nt x d0 x d1 x ...```" is_stateful: true } op { name: "TensorArrayUnpack" input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "value" type_attr: "T" } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } deprecation { version: 20 explanation: "Use TensorArrayScatterV3 with RangeOp" } } op { name: "TensorArrayV2" input_arg { name: "size" type: DT_INT32 } output_arg { name: "handle" type: DT_STRING } attr { name: "dtype" type: "type" } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } } attr { name: "dynamic_size" type: "bool" default_value { b: false } } attr { name: "clear_after_read" type: "bool" default_value { b: true } } attr { name: "tensor_array_name" type: "string" default_value { s: "" } } summary: "Deprecated. Use TensorArrayV3" is_stateful: true } op { name: "TensorArrayV3" input_arg { name: "size" description: "The size of the array." type: DT_INT32 } output_arg { name: "handle" description: "The handle to the TensorArray." type: DT_RESOURCE } output_arg { name: "flow" description: "A scalar used to control gradient flow." type: DT_FLOAT } attr { name: "dtype" type: "type" description: "The type of the elements on the tensor_array." } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } description: "The expected shape of an element, if known. Used to\nvalidate the shapes of TensorArray elements. If this shape is not\nfully specified, gathering zero-size TensorArrays is an error." } attr { name: "dynamic_size" type: "bool" default_value { b: false } description: "A boolean that determines whether writes to the TensorArray\nare allowed to grow the size. By default, this is not allowed." } attr { name: "clear_after_read" type: "bool" default_value { b: true } description: "If true (default), Tensors in the TensorArray are cleared\nafter being read. This disables multiple read semantics but allows early\nrelease of memory." } attr { name: "tensor_array_name" type: "string" default_value { s: "" } description: "Overrides the name used for the temporary tensor_array\nresource. Default value is the name of the \'TensorArray\' op (which\nis guaranteed unique)." } summary: "An array of Tensors of given size." description: "Write data via Write and read via Read or Pack." is_stateful: true } op { name: "TensorArrayWrite" input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "index" type: DT_INT32 } input_arg { name: "value" type_attr: "T" } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } deprecation { version: 16 explanation: "Use TensorArrayWriteV3" } } op { name: "TensorArrayWriteV2" input_arg { name: "handle" type: DT_STRING } input_arg { name: "index" type: DT_INT32 } input_arg { name: "value" type_attr: "T" } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } summary: "Deprecated. Use TensorArrayGradV3" } op { name: "TensorArrayWriteV3" input_arg { name: "handle" description: "The handle to a TensorArray." type: DT_RESOURCE } input_arg { name: "index" description: "The position to write to inside the TensorArray." type: DT_INT32 } input_arg { name: "value" description: "The tensor to write to the TensorArray." type_attr: "T" } input_arg { name: "flow_in" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } output_arg { name: "flow_out" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } attr { name: "T" type: "type" } summary: "Push an element onto the tensor_array." is_stateful: true } op { name: "TensorDataset" input_arg { name: "components" type_list_attr: "Toutput_types" } output_arg { name: "handle" type: DT_VARIANT } attr { name: "Toutput_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "Creates a dataset that emits `components` as a tuple of tensors once." is_stateful: true } op { name: "TensorSliceDataset" input_arg { name: "components" type_list_attr: "Toutput_types" } output_arg { name: "handle" type: DT_VARIANT } attr { name: "Toutput_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } summary: "Creates a dataset that emits each dim-0 slice of `components` once." is_stateful: true } op { name: "TensorSummary" input_arg { name: "tensor" description: "A tensor to serialize." type_attr: "T" } output_arg { name: "summary" type: DT_STRING } attr { name: "T" type: "type" } attr { name: "description" type: "string" default_value { s: "" } description: "A json-encoded SummaryDescription proto." } attr { name: "labels" type: "list(string)" default_value { list { } } description: "An unused list of strings." } attr { name: "display_name" type: "string" default_value { s: "" } description: "An unused string." } summary: "Outputs a `Summary` protocol buffer with a tensor." description: "This op is being phased out in favor of TensorSummaryV2, which lets callers pass\na tag as well as a serialized SummaryMetadata proto string that contains\nplugin-specific data. We will keep this op to maintain backwards compatibility." } op { name: "TensorSummaryV2" input_arg { name: "tag" description: "A string attached to this summary. Used for organization in TensorBoard." type: DT_STRING } input_arg { name: "tensor" description: "A tensor to serialize." type_attr: "T" } input_arg { name: "serialized_summary_metadata" description: "A serialized SummaryMetadata proto. Contains plugin\ndata." type: DT_STRING } output_arg { name: "summary" type: DT_STRING } attr { name: "T" type: "type" } summary: "Outputs a `Summary` protocol buffer with a tensor and per-plugin data." } op { name: "TextLineDataset" input_arg { name: "filenames" description: "A scalar or a vector containing the name(s) of the file(s) to be\nread." type: DT_STRING } input_arg { name: "compression_type" description: "A scalar containing either (i) the empty string (no\ncompression), (ii) \"ZLIB\", or (iii) \"GZIP\"." type: DT_STRING } input_arg { name: "buffer_size" description: "A scalar containing the number of bytes to buffer." type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } summary: "Creates a dataset that emits the lines of one or more text files." is_stateful: true } op { name: "TextLineReader" output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_STRING is_ref: true } attr { name: "skip_header_lines" type: "int" default_value { i: 0 } description: "Number of lines to skip from the beginning of every file." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } summary: "A Reader that outputs the lines of a file delimited by \'\\n\'." is_stateful: true } op { name: "TextLineReaderV2" output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_RESOURCE } attr { name: "skip_header_lines" type: "int" default_value { i: 0 } description: "Number of lines to skip from the beginning of every file." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } summary: "A Reader that outputs the lines of a file delimited by \'\\n\'." is_stateful: true } op { name: "ThreadUnsafeUnigramCandidateSampler" input_arg { name: "true_classes" description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label." type: DT_INT64 } output_arg { name: "sampled_candidates" description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate." type: DT_INT64 } output_arg { name: "true_expected_count" description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability." type: DT_FLOAT } output_arg { name: "sampled_expected_count" description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability." type: DT_FLOAT } attr { name: "num_true" type: "int" description: "Number of true labels per context." has_minimum: true minimum: 1 } attr { name: "num_sampled" type: "int" description: "Number of candidates to randomly sample." has_minimum: true minimum: 1 } attr { name: "unique" type: "bool" description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities." } attr { name: "range_max" type: "int" description: "The sampler will sample integers from the interval [0, range_max)." has_minimum: true minimum: 1 } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "An second seed to avoid seed collision." } summary: "Generates labels for candidate sampling with a learned unigram distribution." description: "See explanations of candidate sampling and the data formats at\ngo/candidate-sampling.\n\nFor each batch, this op picks a single set of sampled candidate labels.\n\nThe advantages of sampling candidates per-batch are simplicity and the\npossibility of efficient dense matrix multiplication. The disadvantage is that\nthe sampled candidates must be chosen independently of the context and of the\ntrue labels." is_stateful: true } op { name: "Tile" input_arg { name: "input" description: "1-D or higher." type_attr: "T" } input_arg { name: "multiples" description: "1-D. Length must be the same as the number of dimensions in `input`" type_attr: "Tmultiples" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tmultiples" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Constructs a tensor by tiling a given tensor." description: "This operation creates a new tensor by replicating `input` `multiples` times.\nThe output tensor\'s i\'th dimension has `input.dims(i) * multiples[i]` elements,\nand the values of `input` are replicated `multiples[i]` times along the \'i\'th\ndimension. For example, tiling `[a b c d]` by `[2]` produces\n`[a b c d a b c d]`." } op { name: "TileGrad" input_arg { name: "input" type_attr: "T" } input_arg { name: "multiples" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } summary: "Returns the gradient of `Tile`." description: "Since `Tile` takes an input and repeats the input `multiples` times\nalong each dimension, `TileGrad` takes in `multiples` and aggregates\neach repeated tile of `input` into `output`." deprecation { version: 3 explanation: "TileGrad has been replaced with reduce_sum" } } op { name: "TopK" input_arg { name: "input" description: "1-D or higher with last dimension at least `k`." type_attr: "T" } output_arg { name: "values" description: "The `k` largest elements along each last dimensional slice." type_attr: "T" } output_arg { name: "indices" description: "The indices of `values` within the last dimension of `input`." type: DT_INT32 } attr { name: "k" type: "int" description: "Number of top elements to look for along the last dimension (along each\nrow for matrices)." has_minimum: true } attr { name: "sorted" type: "bool" default_value { b: true } description: "If true the resulting `k` elements will be sorted by the values in\ndescending order." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Finds values and indices of the `k` largest elements for the last dimension." description: "If the input is a vector (rank-1), finds the `k` largest entries in the vector\nand outputs their values and indices as vectors. Thus `values[j]` is the\n`j`-th largest entry in `input`, and its index is `indices[j]`.\n\nFor matrices (resp. higher rank input), computes the top `k` entries in each\nrow (resp. vector along the last dimension). Thus,\n\n values.shape = indices.shape = input.shape[:-1] + [k]\n\nIf two elements are equal, the lower-index element appears first.\n\nIf `k` varies dynamically, use `TopKV2` below." deprecation { version: 7 explanation: "Use TopKV2 instead" } } op { name: "TopKV2" input_arg { name: "input" description: "1-D or higher with last dimension at least `k`." type_attr: "T" } input_arg { name: "k" description: "0-D. Number of top elements to look for along the last dimension (along each\nrow for matrices)." type: DT_INT32 } output_arg { name: "values" description: "The `k` largest elements along each last dimensional slice." type_attr: "T" } output_arg { name: "indices" description: "The indices of `values` within the last dimension of `input`." type: DT_INT32 } attr { name: "sorted" type: "bool" default_value { b: true } description: "If true the resulting `k` elements will be sorted by the values in\ndescending order." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } summary: "Finds values and indices of the `k` largest elements for the last dimension." description: "If the input is a vector (rank-1), finds the `k` largest entries in the vector\nand outputs their values and indices as vectors. Thus `values[j]` is the\n`j`-th largest entry in `input`, and its index is `indices[j]`.\n\nFor matrices (resp. higher rank input), computes the top `k` entries in each\nrow (resp. vector along the last dimension). Thus,\n\n values.shape = indices.shape = input.shape[:-1] + [k]\n\nIf two elements are equal, the lower-index element appears first." } op { name: "Transpose" input_arg { name: "x" type_attr: "T" } input_arg { name: "perm" type_attr: "Tperm" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tperm" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Shuffle dimensions of x according to a permutation." description: "The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:\n `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`" } op { name: "TruncateDiv" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } summary: "Returns x / y element-wise for integer types." description: "Truncation designates that negative numbers will round fractional quantities\ntoward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different\nthan Python semantics. See `FloorDiv` for a division function that matches\nPython Semantics.\n\n*NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" } op { name: "TruncateMod" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE } } } summary: "Returns element-wise remainder of division. This emulates C semantics in that" description: "the result here is consistent with a truncating divide. E.g. `truncate(x / y) *\ny + truncate_mod(x, y) = x`.\n\n*NOTE*: `TruncateMod` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" } op { name: "TruncatedNormal" input_arg { name: "shape" description: "The shape of the output tensor." type_attr: "T" } output_arg { name: "output" description: "A tensor of the specified shape filled with random truncated normal\nvalues." type_attr: "dtype" } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "dtype" type: "type" description: "The type of the output." allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Outputs random values from a truncated normal distribution." description: "The generated values follow a normal distribution with mean 0 and standard\ndeviation 1, except that values whose magnitude is more than 2 standard\ndeviations from the mean are dropped and re-picked." is_stateful: true } op { name: "UniformCandidateSampler" input_arg { name: "true_classes" description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label." type: DT_INT64 } output_arg { name: "sampled_candidates" description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate." type: DT_INT64 } output_arg { name: "true_expected_count" description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability." type: DT_FLOAT } output_arg { name: "sampled_expected_count" description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability." type: DT_FLOAT } attr { name: "num_true" type: "int" description: "Number of true labels per context." has_minimum: true minimum: 1 } attr { name: "num_sampled" type: "int" description: "Number of candidates to randomly sample." has_minimum: true minimum: 1 } attr { name: "unique" type: "bool" description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities." } attr { name: "range_max" type: "int" description: "The sampler will sample integers from the interval [0, range_max)." has_minimum: true minimum: 1 } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "An second seed to avoid seed collision." } summary: "Generates labels for candidate sampling with a uniform distribution." description: "See explanations of candidate sampling and the data formats at\ngo/candidate-sampling.\n\nFor each batch, this op picks a single set of sampled candidate labels.\n\nThe advantages of sampling candidates per-batch are simplicity and the\npossibility of efficient dense matrix multiplication. The disadvantage is that\nthe sampled candidates must be chosen independently of the context and of the\ntrue labels." is_stateful: true } op { name: "Unique" input_arg { name: "x" description: "1-D." type_attr: "T" } output_arg { name: "y" description: "1-D." type_attr: "T" } output_arg { name: "idx" description: "1-D." type_attr: "out_idx" } attr { name: "T" type: "type" } attr { name: "out_idx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Finds unique elements in a 1-D tensor." description: "This operation returns a tensor `y` containing all of the unique elements of `x`\nsorted in the same order that they occur in `x`. This operation also returns a\ntensor `idx` the same size as `x` that contains the index of each value of `x`\nin the unique output `y`. In other words:\n\n`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`\n\nFor example:\n\n```\n# tensor \'x\' is [1, 1, 2, 4, 4, 4, 7, 8, 8]\ny, idx = unique(x)\ny ==> [1, 2, 4, 7, 8]\nidx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]\n```" } op { name: "UniqueWithCounts" input_arg { name: "x" description: "1-D." type_attr: "T" } output_arg { name: "y" description: "1-D." type_attr: "T" } output_arg { name: "idx" description: "1-D." type_attr: "out_idx" } output_arg { name: "count" description: "1-D." type_attr: "out_idx" } attr { name: "T" type: "type" } attr { name: "out_idx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Finds unique elements in a 1-D tensor." description: "This operation returns a tensor `y` containing all of the unique elements of `x`\nsorted in the same order that they occur in `x`. This operation also returns a\ntensor `idx` the same size as `x` that contains the index of each value of `x`\nin the unique output `y`. Finally, it returns a third tensor `count` that\ncontains the count of each element of `y` in `x`. In other words:\n\n`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`\n\nFor example:\n\n```\n# tensor \'x\' is [1, 1, 2, 4, 4, 4, 7, 8, 8]\ny, idx, count = unique_with_counts(x)\ny ==> [1, 2, 4, 7, 8]\nidx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]\ncount ==> [2, 1, 3, 1, 2]\n```" } op { name: "Unpack" input_arg { name: "value" description: "1-D or higher, with `axis` dimension size equal to `num`." type_attr: "T" } output_arg { name: "output" description: "The list of tensors unpacked from `value`." type_attr: "T" number_attr: "num" } attr { name: "num" type: "int" has_minimum: true } attr { name: "T" type: "type" } attr { name: "axis" type: "int" default_value { i: 0 } description: "Dimension along which to unpack. Negative values wrap around, so the\nvalid range is `[-R, R)`." } summary: "Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors." description: "Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.\nFor example, given a tensor of shape `(A, B, C, D)`;\n\nIf `axis == 0` then the i\'th tensor in `output` is the slice `value[i, :, :, :]`\n and each tensor in `output` will have shape `(B, C, D)`. (Note that the\n dimension unpacked along is gone, unlike `split`).\n\nIf `axis == 1` then the i\'th tensor in `output` is the slice `value[:, i, :, :]`\n and each tensor in `output` will have shape `(A, C, D)`.\nEtc.\n\nThis is the opposite of `pack`." } op { name: "UnsortedSegmentMax" input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension." type_attr: "Tindices" } input_arg { name: "num_segments" type: DT_INT32 } output_arg { name: "output" description: "Has same shape as data, except for dimension 0 which\nhas size `num_segments`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Computes the Max along segments of a tensor." description: "Read @{$math_ops#segmentation$the section on segmentation} for an explanation of\nsegments.\n\nThis operator is similar to the [unsorted segment sum operator](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).\nInstead of computing the sum over segments, it computes the maximum\nsuch that:\n\n\\\\(output_i = \\max_j data_j\\\\) where max is over `j` such\nthat `segment_ids[j] == i`.\n\nIf the maximum is empty for a given segment ID `i`, it outputs the smallest possible value for specific numeric type,\n `output[i] = numeric_limits::min()`.\n\n
\n\n
" } op { name: "UnsortedSegmentSum" input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" description: "A tensor whose shape is a prefix of `data.shape`." type_attr: "Tindices" } input_arg { name: "num_segments" type: DT_INT32 } output_arg { name: "output" description: "Has same shape as data, except for the first `segment_ids.rank`\ndimensions, which are replaced with a single dimension which has size\n`num_segments`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Computes the sum along segments of a tensor." description: "Read @{$math_ops#segmentation$the section on segmentation} for an explanation of\nsegments.\n\nComputes a tensor such that\n`(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such\nthat `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids`\nneed not be sorted and need not cover all values in the full\nrange of valid values.\n\nIf the sum is empty for a given segment ID `i`, `output[i] = 0`.\n\n`num_segments` should equal the number of distinct segment IDs.\n\n
\n\n
" } op { name: "Unstage" output_arg { name: "values" type_list_attr: "dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } summary: "Op is similar to a lightweight Dequeue." description: "The basic functionality is similar to dequeue with many fewer\ncapabilities and options. This Op is optimized for performance." is_stateful: true } op { name: "VarHandleOp" output_arg { name: "resource" type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } description: "the container this variable is placed in." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "the name by which this variable is referred to." } attr { name: "dtype" type: "type" description: "the type of this variable. Must agree with the dtypes\nof all ops using this variable." } attr { name: "shape" type: "shape" description: "The (possibly partially specified) shape of this variable." } summary: "Creates a handle to a Variable resource." is_stateful: true } op { name: "VarIsInitializedOp" input_arg { name: "resource" description: "the input resource handle." type: DT_RESOURCE } output_arg { name: "is_initialized" description: "a scalar boolean which is true if the variable has been\ninitialized." type: DT_BOOL } summary: "Checks whether a resource handle-based variable has been initialized." is_stateful: true } op { name: "Variable" output_arg { name: "ref" type_attr: "dtype" is_ref: true } attr { name: "shape" type: "shape" } attr { name: "dtype" type: "type" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } summary: "Use VariableV2 instead." is_stateful: true } op { name: "VariableShape" input_arg { name: "input" type: DT_RESOURCE } output_arg { name: "output" type_attr: "out_type" } attr { name: "out_type" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } summary: "Returns the shape of the variable pointed to by `resource`." description: "This operation returns a 1-D integer tensor representing the shape of `input`.\n\nFor example:\n\n```\n# \'t\' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]\nshape(t) ==> [2, 2, 3]\n```" is_stateful: true } op { name: "VariableV2" output_arg { name: "ref" description: "A reference to the variable tensor." type_attr: "dtype" is_ref: true } attr { name: "shape" type: "shape" description: "The shape of the variable tensor." } attr { name: "dtype" type: "type" description: "The type of elements in the variable tensor." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this variable is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this variable is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } summary: "Holds state in the form of a tensor that persists across steps." description: "Outputs a ref to the tensor state so it may be read or modified.\nTODO(zhifengc/mrry): Adds a pointer to a more detail document\nabout sharing states in tensorflow." is_stateful: true } op { name: "Where" input_arg { name: "input" type_attr: "T" } output_arg { name: "index" type: DT_INT64 } attr { name: "T" type: "type" default_value { type: DT_BOOL } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF type: DT_UINT32 type: DT_UINT64 type: DT_BOOL } } } summary: "Returns locations of nonzero / true values in a tensor." description: "This operation returns the coordinates of true elements in `input`. The\ncoordinates are returned in a 2-D tensor where the first dimension (rows)\nrepresents the number of true elements, and the second dimension (columns)\nrepresents the coordinates of the true elements. Keep in mind, the shape of\nthe output tensor can vary depending on how many true values there are in\n`input`. Indices are output in row-major order.\n\nFor example:\n\n```\n# \'input\' tensor is [[True, False]\n# [True, False]]\n# \'input\' has two true values, so output has two coordinates.\n# \'input\' has rank of 2, so coordinates have two indices.\nwhere(input) ==> [[0, 0],\n [1, 0]]\n\n# `input` tensor is [[[True, False]\n# [True, False]]\n# [[False, True]\n# [False, True]]\n# [[False, False]\n# [False, True]]]\n# \'input\' has 5 true values, so output has 5 coordinates.\n# \'input\' has rank of 3, so coordinates have three indices.\nwhere(input) ==> [[0, 0, 0],\n [0, 1, 0],\n [1, 0, 1],\n [1, 1, 1],\n [2, 1, 1]]\n\n# `input` tensor is [[[1.5, 0.0]\n# [-0.5, 0.0]]\n# [[0.0, 0.25]\n# [0.0, 0.75]]\n# [[0.0, 0.0]\n# [0.0, 0.01]]]\n# \'input\' has 5 nonzero values, so output has 5 coordinates.\n# \'input\' has rank of 3, so coordinates have three indices.\nwhere(input) ==> [[0, 0, 0],\n [0, 1, 0],\n [1, 0, 1],\n [1, 1, 1],\n [2, 1, 1]]\n\n# `input` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j]\n# [0.0 + 0.5j, 0.0 + 0.0j]]\n# [[0.0 + 0.0j, 0.25 + 1.5j]\n# [0.0 + 0.0j, 0.75 + 0.0j]]\n# [[0.0 + 0.0j, 0.0 + 0.0j]\n# [0.0 + 0.0j, 0.01 + 0.0j]]]\n# \'input\' has 5 nonzero magnitude values, so output has 5 coordinates.\n# \'input\' has rank of 3, so coordinates have three indices.\nwhere(input) ==> [[0, 0, 0],\n [0, 1, 0],\n [1, 0, 1],\n [1, 1, 1],\n [2, 1, 1]]\n```" } op { name: "WholeFileReader" output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } summary: "A Reader that outputs the entire contents of a file as a value." description: "To use, enqueue filenames in a Queue. The output of ReaderRead will\nbe a filename (key) and the contents of that file (value)." is_stateful: true } op { name: "WholeFileReaderV2" output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } summary: "A Reader that outputs the entire contents of a file as a value." description: "To use, enqueue filenames in a Queue. The output of ReaderRead will\nbe a filename (key) and the contents of that file (value)." is_stateful: true } op { name: "WriteFile" input_arg { name: "filename" description: "scalar. The name of the file to which we write the contents." type: DT_STRING } input_arg { name: "contents" description: "scalar. The content to be written to the output file." type: DT_STRING } summary: "Writes contents to the file at input filename. Creates file and recursively" description: "creates directory if not existing." } op { name: "ZerosLike" input_arg { name: "x" description: "a tensor of type T." type_attr: "T" } output_arg { name: "y" description: "a tensor of the same shape and type as x but filled with zeros." type_attr: "T" } attr { name: "T" type: "type" } summary: "Returns a tensor of zeros with the same shape and type as x." } op { name: "Zeta" input_arg { name: "x" type_attr: "T" } input_arg { name: "q" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } summary: "Compute the Hurwitz zeta function \\\\(\\zeta(x, q)\\\\)." description: "The Hurwitz zeta function is defined as:\n\n\n\\\\(\\zeta(x, q) = \\sum_{n=0}^{\\infty} (q + n)^{-x}\\\\)" } op { name: "ZipDataset" input_arg { name: "input_datasets" type: DT_VARIANT number_attr: "N" } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } attr { name: "N" type: "int" has_minimum: true minimum: 1 } summary: "Creates a dataset that zips together `input_datasets`." }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy