object STen extends Serializable
Companion object of lamp.STen
- STen.fromDoubleArray, STen.fromLongArray, STen.fromFloatArray factory methods copy data from JVM arrays into off heap memory and create an STen instance
- There are similar factories which take SADDLE data structures
- Alphabetic
- By Inheritance
- STen
- Serializable
- AnyRef
- Any
- Hide All
- Show All
- Public
- Protected
Type Members
- implicit class OwnedSyntax extends AnyRef
Value Members
- final def !=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- final def ##: Int
- Definition Classes
- AnyRef → Any
- final def ==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- def addOut(out: STen, self: STen, other: STen, alpha: Double): Unit
- def addcdivOut(out: STen, self: STen, tensor1: STen, tensor2: STen, alpha: Double): Unit
- def addcmulOut(out: STen, self: STen, tensor1: STen, tensor2: STen, alpha: Double): Unit
- def addmmOut(out: STen, self: STen, mat1: STen, mat2: STen, beta: Double, alpha: Double): Unit
- def arange[S](start: Double, end: Double, step: Double, tensorOptions: STenOptions = STen.dOptions)(implicit arg0: Sc[S]): STen
- def arange_l[S](start: Long, end: Long, step: Long, tensorOptions: STenOptions = STen.lOptions)(implicit arg0: Sc[S]): STen
- final def asInstanceOf[T0]: T0
- Definition Classes
- Any
- def atan2[S](y: STen, x: STen)(implicit arg0: Sc[S]): STen
- val bOptions: STenOptions
A tensor option specifying CPU and byte
- val bf16Options: STenOptions
- def bmmOut(out: STen, self: STen, other: STen): Unit
- def cartesianProduct[S](list: List[STen])(implicit arg0: Sc[S]): STen
- def cat[S](tensors: Seq[STen], dim: Long)(implicit arg0: Sc[S]): STen
- def catOut(out: STen, tensors: Seq[STen], dim: Int): Unit
- def clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.CloneNotSupportedException]) @IntrinsicCandidate() @native()
- val dOptions: STenOptions
A tensor option specifying CPU and double
- def divOut(out: STen, self: STen, other: STen): Unit
- final def eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- def equals(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef → Any
- def eye[S](n: Int, m: Int, tensorOptions: STenOptions)(implicit arg0: Sc[S]): STen
- def eye[S](n: Int, tensorOptions: STenOptions = STen.dOptions)(implicit arg0: Sc[S]): STen
- val fOptions: STenOptions
A tensor option specifying CPU and float
- def free(value: Tensor): STen
Wraps a tensor without registering it to any scope.
Wraps a tensor without registering it to any scope.
Memory may leak.
- def fromByteArray[S](ar: Array[Byte], dim: Seq[Long], device: Device)(implicit arg0: Sc[S]): STen
Returns a tensor with the given content and shape on the given device
- def fromDoubleArray[S](ar: Array[Double], dim: Seq[Long], device: Device, precision: FloatingPointPrecision)(implicit arg0: Sc[S]): STen
Returns a tensor with the given content and shape on the given device
- def fromFile[S](path: String, offset: Long, length: Long, scalarTypeByte: Byte, pin: Boolean)(implicit arg0: Sc[S]): STen
Create tensor directly from file.
Create tensor directly from file. Memory maps a file into host memory. Data is not passed through the JVM. Returned tensor is always on the CPU device.
- path
file path
- offset
byte offset into the file. Must be page aligned (usually multiple of 4096)
- length
byte length of the data
- scalarTypeByte
scalar type (byte=1,short=2,int=3,long=4,half=5,float=6,double=7)
- pin
if true the mapped segment will be page locked with mlock(2)
- returns
tensor on CPU
- def fromFloatArray[S](ar: Array[Float], dim: Seq[Long], device: Device)(implicit arg0: Sc[S]): STen
Returns a tensor with the given content and shape on the given device
- def fromIntArray[S](ar: Array[Int], dim: Seq[Long], device: Device)(implicit arg0: Sc[S]): STen
Returns a tensor with the given content and shape on the given device
- def fromLongArray[S](ar: Array[Long])(implicit arg0: Sc[S]): STen
Returns a tensor with the given content and shape on the given device
- def fromLongArray[S](ar: Array[Long], dim: Seq[Long], device: Device)(implicit arg0: Sc[S]): STen
Returns a tensor with the given content and shape on the given device
- def fromLongArrayOfArrays[S](ar: Array[Array[Long]], dim: Seq[Long], device: Device)(implicit arg0: Sc[S]): STen
- def fromShortArray[S](ar: Array[Short], dim: Seq[Long], device: Device)(implicit arg0: Sc[S]): STen
Returns a tensor with the given content and shape on the given device
- final def getClass(): Class[_ <: AnyRef]
- Definition Classes
- AnyRef → Any
- Annotations
- @IntrinsicCandidate() @native()
- def hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @IntrinsicCandidate() @native()
- val iOptions: STenOptions
A tensor option specifying CPU and int
- def indexCopyOut(out: STen, self: STen, dim: Int, index: STen, source: STen): Unit
- def indexSelectOut(out: STen, self: STen, dim: Int, index: STen): Unit
- final def isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- val lOptions: STenOptions
A tensor option specifying CPU and long
- def linspace[S](start: Double, end: Double, steps: Long, tensorOptions: STenOptions = STen.dOptions)(implicit arg0: Sc[S]): STen
- def lstsq[S](A: STen, B: STen)(implicit arg0: Sc[S]): (STen, STen, STen, STen)
- def meanOut(out: STen, self: STen, dim: Seq[Int], keepDim: Boolean): Unit
- def mmOut(out: STen, self: STen, other: STen): Unit
- def mse_loss[S](self: STen, target: STen, reduction: Long)(implicit arg0: Sc[S]): STen
- def mse_loss_backward[S](gradOutput: STen, self: STen, target: STen, reduction: Long)(implicit arg0: Sc[S]): STen
- def mulOut(out: STen, self: STen, other: STen): Unit
- def multinomial[S](probs: STen, numSamples: Int, replacement: Boolean)(implicit arg0: Sc[S]): STen
- def ncclBoadcast(tensors: Seq[(STen, NcclComm)]): Unit
Broadcast tensor on root to the clique Blocks until all peers execute the broadcast.
Broadcast tensor on root to the clique Blocks until all peers execute the broadcast. Takes a list of tensors for the case where a single thread manages multiple GPUs
- def ncclInitComm(nRanks: Int, myRank: Int, myDevice: Int, ncclUniqueId: NcclUniqueId): NcclComm
Blocks until all peers join the clique.
- def ncclReduce(inputs: Seq[(STen, NcclComm)], output: STen, rootRank: Int): Unit
Reduction with + Output must be on the root rank
Reduction with + Output must be on the root rank
Blocks until all peers execute the reduce. Takes a list of tensors for the case where a single thread manages multiple GPUs
- final def ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- def normal[S](mean: Double, std: Double, size: Seq[Long], options: STenOptions)(implicit arg0: Sc[S]): STen
- final def notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @IntrinsicCandidate() @native()
- final def notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @IntrinsicCandidate() @native()
- def ones[S](size: Seq[Long], tensorOptions: STenOptions = STen.dOptions)(implicit arg0: Sc[S]): STen
- def onesLike[S](tensor: STen)(implicit arg0: Sc[S]): STen
- def onesLike[S](tensor: Tensor)(implicit arg0: Sc[S]): STen
- def owned(value: Tensor)(implicit scope: Scope): STen
Wraps an aten.Tensor and registering it to the given scope
- def powOut(out: STen, self: STen, other: STen): Unit
- def powOut(out: STen, self: STen, other: Double): Unit
- def rand[S](size: Seq[Long], tensorOptions: STenOptions = STen.dOptions)(implicit arg0: Sc[S]): STen
- def randint[S](low: Long, high: Long, size: Seq[Long], tensorOptions: STenOptions)(implicit arg0: Sc[S]): STen
- def randint[S](high: Long, size: Seq[Long], tensorOptions: STenOptions = STen.dOptions)(implicit arg0: Sc[S]): STen
- def randn[S](size: Seq[Long], tensorOptions: STenOptions = STen.dOptions)(implicit arg0: Sc[S]): STen
- def randperm[S](n: Long, tensorOptions: STenOptions = STen.dOptions)(implicit arg0: Sc[S]): STen
- def remainderOut(out: STen, self: STen, other: Double): Unit
- def remainderOut(out: STen, self: STen, other: STen): Unit
- def scalarDouble[S](value: Double, options: STenOptions)(implicit arg0: Sc[S]): STen
- def scalarLong(value: Long, options: STenOptions)(implicit scope: Scope): STen
- def scaledDotProductAttention[S](query: STen, key: STen, value: STen, isCausal: Boolean)(implicit arg0: Sc[S]): (STen, STen)
- def scaledDotProductAttentionBackward[S](gradOutput: STen, query: STen, key: STen, value: STen, out: STen, logsumexp: STen, isCausal: Boolean)(implicit arg0: Sc[S]): (STen, STen, STen)
- val shOptions: STenOptions
A tensor option specifying CPU and short
- def smooth_l1_loss_backward[S](gradOutput: STen, self: STen, target: STen, reduction: Long, beta: Double)(implicit arg0: Sc[S]): STen
- def softplus_backward[S](gradOutput: STen, self: STen, beta: Double, threshold: Double)(implicit arg0: Sc[S]): STen
- def sparse_coo[S](indices: STen, values: STen, dim: Seq[Long], tensorOptions: STenOptions = STen.dOptions)(implicit arg0: Sc[S]): STen
- def stack[S](tensors: Seq[STen], dim: Long)(implicit arg0: Sc[S]): STen
- def subOut(out: STen, self: STen, other: STen, alpha: Double): Unit
- def sumOut(out: STen, self: STen, dim: Seq[Int], keepDim: Boolean): Unit
- final def synchronized[T0](arg0: => T0): T0
- Definition Classes
- AnyRef
- def tanh_backward[S](gradOutput: STen, output: STen)(implicit arg0: Sc[S]): STen
- def tensorsFromFile[S](path: String, offset: Long, length: Long, pin: Boolean, tensors: List[(Byte, Long, Long)])(implicit arg0: Sc[S]): Vector[STen]
Create tensors directly from file.
Create tensors directly from file. Memory maps a file into host memory. Data is not passed through the JVM. Returned tensor is always on the CPU device.
- path
file path
- offset
byte offset into the file. Must be page aligned (usually multiple of 4096)
- length
byte length of the data (all tensors in total)
- pin
if true the mapped segment will be page locked with mlock(2)
- tensors
list of tensors with (scalarType, byte offset, byte length), byte offset must be aligned to 8
- returns
tensor on CPU
- def toString(): String
- Definition Classes
- AnyRef → Any
- def to_dense_backward[S](gradOutput: STen, input: STen)(implicit arg0: Sc[S]): STen
- def triangularSolve[S](b: STen, A: STen, upper: Boolean, transpose: Boolean, uniTriangular: Boolean)(implicit arg0: Sc[S]): STen
- final def wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- final def wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException]) @native()
- final def wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- def where[S](condition: Tensor, self: STen, other: STen)(implicit arg0: Sc[S]): STen
- def where[S](condition: STen, self: STen, other: STen)(implicit arg0: Sc[S]): STen
- def zeros[S](size: Seq[Long], tensorOptions: STenOptions = STen.dOptions)(implicit arg0: Sc[S]): STen
- def zerosLike[S](tensor: STen)(implicit arg0: Sc[S]): STen
- def zerosLike[S](tensor: Tensor)(implicit arg0: Sc[S]): STen
Deprecated Value Members
- def finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.Throwable]) @Deprecated
- Deprecated
(Since version 9)