All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.bytedeco.pytorch.Adagrad Maven / Gradle / Ivy

The newest version!
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE

package org.bytedeco.pytorch;

import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;

import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
import org.bytedeco.javacpp.chrono.*;
import static org.bytedeco.javacpp.global.chrono.*;

import static org.bytedeco.pytorch.global.torch.*;


@Namespace("torch::optim") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class Adagrad extends Optimizer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public Adagrad(Pointer p) { super(p); }

  public Adagrad(
        @ByVal OptimizerParamGroupVector param_groups,
        @ByVal(nullValue = "torch::optim::AdagradOptions{}") AdagradOptions defaults) { super((Pointer)null); allocate(param_groups, defaults); }
  private native void allocate(
        @ByVal OptimizerParamGroupVector param_groups,
        @ByVal(nullValue = "torch::optim::AdagradOptions{}") AdagradOptions defaults);
  public Adagrad(
        @ByVal OptimizerParamGroupVector param_groups) { super((Pointer)null); allocate(param_groups); }
  private native void allocate(
        @ByVal OptimizerParamGroupVector param_groups);

  public Adagrad(@ByVal TensorVector params, @ByVal(nullValue = "torch::optim::AdagradOptions{}") AdagradOptions defaults) { super((Pointer)null); allocate(params, defaults); }
  private native void allocate(@ByVal TensorVector params, @ByVal(nullValue = "torch::optim::AdagradOptions{}") AdagradOptions defaults);
  public Adagrad(@ByVal TensorVector params) { super((Pointer)null); allocate(params); }
  private native void allocate(@ByVal TensorVector params);

  public native @ByVal Tensor step(@ByVal(nullValue = "torch::optim::Optimizer::LossClosure(nullptr)") LossClosure closure);
  public native @ByVal Tensor step();
  public native void save(@ByRef OutputArchive archive);
  public native void load(@ByRef InputArchive archive);
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy