sealed trait Variable extends AnyRef
A value of a tensor valued function, a vertex in the computational graph.
A Variable may be constant, i.e. depends on no other Variables. Constant variables may or may not need their partial derivatives computed.
- Alphabetic
- By Inheritance
- Variable
- AnyRef
- Any
- Hide All
- Show All
- Public
- Protected
Abstract Value Members
- abstract def op: Option[Op]
The parent operation of this value in the computational graph.
The parent operation of this value in the computational graph. Empty for constants.
- abstract def partialDerivative: Option[STen]
The partial derivative, or a placeholder tensor for the partial derivative.
The partial derivative, or a placeholder tensor for the partial derivative.
Returns empty iff this Variable needs no gradient computation. Otherwise a placeholder tensor is allocated upfront when the Variable is allocated.
- abstract def value: STen
The actual tensor value of this Variable.
Concrete Value Members
- final def !=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- final def ##: Int
- Definition Classes
- AnyRef → Any
- def *[S](other: Double)(implicit arg0: Sc[S]): Variable
- def *[S](other: Variable)(implicit arg0: Sc[S]): Variable
- def +[S](other: Double)(implicit arg0: Sc[S]): Variable
- def +[S](other: Variable)(implicit arg0: Sc[S]): Variable
- def -[S](other: Variable)(implicit arg0: Sc[S]): Variable
- def /[S](other: Variable)(implicit arg0: Sc[S]): Variable
- final def ==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- def argmax[S](dim: Long, keepDim: Boolean)(implicit arg0: Sc[S]): Variable
- final def asInstanceOf[T0]: T0
- Definition Classes
- Any
- def assign[S](other: Variable)(implicit arg0: Sc[S]): Variable
- def atan[S](implicit arg0: Sc[S]): Variable
- def backprop(): Unit
Runs the backpropagation algorithm starting from this value
Runs the backpropagation algorithm starting from this value
Only meaningful if this is scalar i.e. the number of elements in the value tensor is 1.
- def binaryCrossEntropyWithLogitsLoss[S](target: STen, posWeights: Option[STen] = None, reduction: Reduction = Mean)(implicit arg0: Sc[S]): Variable
- def bmm[S](other: Variable)(implicit arg0: Sc[S]): Variable
- def cast[S](precision: FloatingPointPrecision)(implicit arg0: Sc[S]): Variable
- def cat[S](other: Variable, dim: Long)(implicit arg0: Sc[S]): Variable
- def choleskyLower[S](implicit arg0: Sc[S]): Variable
- def choleskySolve[S](factor: Variable, upper: Boolean = false)(implicit arg0: Sc[S]): Variable
- def clamp[S](min: Variable, max: Variable)(implicit arg0: Sc[S]): Variable
- def clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.CloneNotSupportedException]) @IntrinsicCandidate() @native()
- def colSum[S](implicit arg0: Sc[S]): Variable
- def cos[S](implicit arg0: Sc[S]): Variable
- def cross[S](other: Variable, dim: Int)(implicit arg0: Sc[S]): Variable
- def crossEntropy[S](other: Variable)(implicit arg0: Sc[S]): Variable
- def debug[S](fun: (STen, Boolean, Boolean) => Unit)(implicit arg0: Sc[S]): Variable
- def detached: Constant
Returns an other Variable wrapping the same value tensor, without any parent and with
needsGrad=false
. - def diag[S](diagonal: Long)(implicit arg0: Sc[S]): Variable
- def dropout[S](prob: Double, train: Boolean)(implicit arg0: Sc[S]): Variable
- final def eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- def equals(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef → Any
- def euclideanDistance[S](b: Variable, dim: Int)(implicit arg0: Sc[S]): Variable
- def exp[S](implicit arg0: Sc[S]): Variable
- def expand[S](shape: List[Long])(implicit arg0: Sc[S]): Variable
- def expandAs[S](other: STen)(implicit arg0: Sc[S]): Variable
- def flatten[S](startDim: Int, endDim: Int)(implicit arg0: Sc[S]): Variable
- def flatten[S](startDim: Int)(implicit arg0: Sc[S]): Variable
- def flatten[S](implicit arg0: Sc[S]): Variable
- def flattenLastDimensions[S](dims: Int)(implicit arg0: Sc[S]): Variable
- def gelu[S](implicit arg0: Sc[S]): Variable
- final def getClass(): Class[_ <: AnyRef]
- Definition Classes
- AnyRef → Any
- Annotations
- @IntrinsicCandidate() @native()
- def graphMemoryAllocationReport: GraphMemoryAllocationReport
- def hardSwish[S](implicit arg0: Sc[S]): Variable
- def hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @IntrinsicCandidate() @native()
- val id: AnyRef
Returns unique, stable reference.
- def indexAdd[S](index: Variable, dim: Int, maxIndex: Long)(implicit arg0: Sc[S]): Variable
- def indexAddFromSource[S](index: Variable, dim: Int, source: Variable)(implicit arg0: Sc[S]): Variable
- def indexFill[S](index: Variable, dim: Int, fillValue: Double)(implicit arg0: Sc[S]): Variable
- def indexSelect[S](dim: Long, index: Variable)(implicit arg0: Sc[S]): Variable
- def inv[S](implicit arg0: Sc[S]): Variable
- final def isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- def leakyRelu[S](negativeSlope: Double)(implicit arg0: Sc[S]): Variable
- def log[S](implicit arg0: Sc[S]): Variable
- def log1p[S](implicit arg0: Sc[S]): Variable
- def logSoftMax[S](dim: Int)(implicit arg0: Sc[S]): Variable
- def logdet[S](implicit arg0: Sc[S]): Variable
- def makeBooleanMask[S](q: Long)(implicit arg0: Sc[S]): Variable
- def maskFill[S](mask: Variable, fill: Double)(implicit arg0: Sc[S]): Variable
- def maskSelect[S](mask: Variable)(implicit arg0: Sc[S]): Variable
- def maximum[S](other: Variable)(implicit arg0: Sc[S]): Variable
- def mean[S](dim: List[Int], keepDim: Boolean)(implicit arg0: Sc[S]): Variable
- def mean[S](dim: List[Int])(implicit arg0: Sc[S]): Variable
- def minimum[S](other: Variable)(implicit arg0: Sc[S]): Variable
- def mm[S](other: Variable)(implicit arg0: Sc[S]): Variable
- def mseLoss[S](target: STen, reduction: Reduction = Mean)(implicit arg0: Sc[S]): Variable
- final def ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- def needsGrad: Boolean
Returns true if lamp.autograd.Variable.partialDerivative is defined.
- def nllLoss[S](target: STen, weights: STen, reduction: Reduction = Mean, ignore: Long = -100L)(implicit arg0: Sc[S]): Variable
- def norm2[S](dim: List[Int], keepDim: Boolean)(implicit arg0: Sc[S]): Variable
- def norm2[S](dim: List[Int])(implicit arg0: Sc[S]): Variable
- def normalize[S](dim: List[Int], eps: Double)(implicit arg0: Sc[S]): Variable
- final def notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @IntrinsicCandidate() @native()
- final def notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @IntrinsicCandidate() @native()
- def oneHot[S](numClasses: Int)(implicit arg0: Sc[S]): Variable
- def options[S](implicit arg0: Sc[S]): STenOptions
Returns the tensor options of its value.
- def pinv[S](rcond: Double = 1e-5)(implicit arg0: Sc[S]): Variable
- def pow[S](exponent: Variable)(implicit arg0: Sc[S]): Variable
- def pow[S](const: Double)(implicit arg0: Sc[S]): Variable
- def relu[S](implicit arg0: Sc[S]): Variable
- def repeatInterleave[S](repeats: Variable, dim: Int)(implicit arg0: Sc[S]): Variable
- def reshape[S](shape: List[Long])(implicit arg0: Sc[S]): Variable
- def rowSum[S](implicit arg0: Sc[S]): Variable
- def scatterAdd[S](index: Variable, dim: Int, maxIndex: Long)(implicit arg0: Sc[S]): Variable
- def select[S](dim: Long, index: Long)(implicit arg0: Sc[S]): Variable
- def shape: List[Long]
Returns the shape of its value.
- def sigmoid[S](implicit arg0: Sc[S]): Variable
- def sin[S](implicit arg0: Sc[S]): Variable
- val sizes: List[Long]
Returns the shape of its value.
- def slice[S](dim: Long, start: Long, end: Long, step: Long)(implicit arg0: Sc[S]): Variable
- def smoothL1Loss[S](target: STen, reduction: Reduction = Mean, beta: Double = 1.0)(implicit arg0: Sc[S]): Variable
- def softplus[S](beta: Double, threshold: Double)(implicit arg0: Sc[S]): Variable
- def squaredFrobenius[S](implicit arg0: Sc[S]): Variable
- def sum[S](dim: List[Int], keepDim: Boolean)(implicit arg0: Sc[S]): Variable
- def sum[S](implicit arg0: Sc[S]): Variable
- def swish1[S](implicit arg0: Sc[S]): Variable
- final def synchronized[T0](arg0: => T0): T0
- Definition Classes
- AnyRef
- def t[S](implicit arg0: Sc[S]): Variable
Returns a new variable with the first two dimensions transposed.
- def tan[S](implicit arg0: Sc[S]): Variable
- def tanh[S](implicit arg0: Sc[S]): Variable
- def toDense[S](implicit arg0: Sc[S]): Variable
- def toDoubleArray: Array[Double]
- def toLongArray: Array[Long]
- def toString(): String
- Definition Classes
- Variable → AnyRef → Any
- def transpose[S](dim1: Int, dim2: Int)(implicit arg0: Sc[S]): Variable
Returns a new variable with the respective dimensions transposed.
- def variance[S](dim: List[Int])(implicit arg0: Sc[S]): Variable
- def view[S](shape: List[Long])(implicit arg0: Sc[S]): Variable
- final def wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- final def wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException]) @native()
- final def wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- lazy val wengert: Seq[Variable]
Returns the Wengert list
- def withGrad[S](implicit arg0: Sc[S]): ConstantWithGrad
Returns an other Variable wrapping the same value tensor, without any parent and with
needsGrad=true
. - def zeroGrad(): Unit
In place zeros out the partial derivative
Deprecated Value Members
- def finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.Throwable]) @Deprecated
- Deprecated
(Since version 9)