Packages

o

lamp.data

IOLoops

object IOLoops

Contains a training loops and helpers around it

The two training loops implemented here are:

Linear Supertypes
AnyRef, Any
Ordering
  1. Alphabetic
  2. By Inheritance
Inherited
  1. IOLoops
  2. AnyRef
  3. Any
  1. Hide All
  2. Show All
Visibility
  1. Public
  2. Protected

Type Members

  1. case class TrainingLoopContext(epoch: Int, lastValidationLoss: Option[Double], minValidationLoss: Option[Double]) extends Product with Serializable

Value Members

  1. final def !=(arg0: Any): Boolean
    Definition Classes
    AnyRef → Any
  2. final def ##: Int
    Definition Classes
    AnyRef → Any
  3. final def ==(arg0: Any): Boolean
    Definition Classes
    AnyRef → Any
  4. final def asInstanceOf[T0]: T0
    Definition Classes
    Any
  5. def clone(): AnyRef
    Attributes
    protected[lang]
    Definition Classes
    AnyRef
    Annotations
    @throws(classOf[java.lang.CloneNotSupportedException]) @IntrinsicCandidate() @native()
  6. def epochs[I, M <: GenericModule[I, Variable], LRState, BatchStreamState, BatchStreamBuffers](model: SupervisedModel[I, M], optimizerFactory: (Seq[(STen, PTag)]) => Optimizer, trainBatchesOverEpoch: (TrainingLoopContext) => BatchStream[(I, STen), BatchStreamState, BatchStreamBuffers], validationBatchesOverEpoch: Option[(TrainingLoopContext) => BatchStream[(I, STen), BatchStreamState, BatchStreamBuffers]], epochs: Int, trainingCallback: Option[TrainingCallback[M]] = None, validationCallback: Option[ValidationCallback[M]] = None, checkpointState: Option[(SimpleLoopState, LRState) => IO[Unit]] = None, validationFrequency: Int = 1, logger: Option[Logger] = None, returnMinValidationLossModel: Seq[Int] = Nil, learningRateSchedule: LearningRateSchedule[LRState] = LearningRateSchedule.noop, prefetch: Boolean = false, overlapModelWithLoad: Boolean = false, dataParallelModels: Seq[SupervisedModel[I, M]] = Nil, initState: Option[SimpleLoopState] = None, accumulateGradientOverNBatches: Int = 1, learningRateScheduleInitState: Option[LRState] = None, printOptimizerAllocations: Boolean = false, validationLossExponentialSmoothingFactor: Double = 1.0)(implicit arg0: Load[M]): IO[(Int, SupervisedModel[I, M], List[(Int, Double, Option[(Double, Double)])], LRState, SimpleLoopState)]
  7. final def eq(arg0: AnyRef): Boolean
    Definition Classes
    AnyRef
  8. def equals(arg0: AnyRef): Boolean
    Definition Classes
    AnyRef → Any
  9. def forwardAndDiscardBatchStream[I, M <: GenericModule[I, Variable], S, C](batchStream: BatchStream[(I, STen), S, C], buffers: (Device) => Resource[IO, C], model: M with GenericModule[I, Variable]): IO[Unit]
  10. final def getClass(): Class[_ <: AnyRef]
    Definition Classes
    AnyRef → Any
    Annotations
    @IntrinsicCandidate() @native()
  11. def hashCode(): Int
    Definition Classes
    AnyRef → Any
    Annotations
    @IntrinsicCandidate() @native()
  12. final def isInstanceOf[T0]: Boolean
    Definition Classes
    Any
  13. final def ne(arg0: AnyRef): Boolean
    Definition Classes
    AnyRef
  14. final def notify(): Unit
    Definition Classes
    AnyRef
    Annotations
    @IntrinsicCandidate() @native()
  15. final def notifyAll(): Unit
    Definition Classes
    AnyRef
    Annotations
    @IntrinsicCandidate() @native()
  16. def oneEpoch[I, M <: GenericModule[I, Variable], S, C](epochCount: Long, trainingCallback: Option[TrainingCallback[M]], model: ModelWithOptimizer[I, M], trainBatches: BatchStream[(I, STen), S, C], logger: Option[Logger], learningRateScheduleFactor: Double, prefetch: Boolean, overlapModelWithLoad: Boolean, accumulateGradientOverNBatches: Int): IO[Double]
  17. def parallelRunBatchStream[I, O, M <: GenericModule[I, O], S, O2, C](batchStream: BatchStream[(I, STen), S, C], bufferPerModel: Resource[IO, List[(Device, C)]], models: Seq[M with GenericModule[I, O]])(tx: ((I, STen), O) => O2)(implicit arg0: Movable[O2], scope: Scope): IO[Vector[O2]]
  18. def runBatchStream[A, B, M <: GenericModule[A, B], S, C](batchStream: BatchStream[A, S, C], buffers: Resource[IO, C], model: M with GenericModule[A, B])(implicit arg0: Movable[B], scope: Scope): IO[Vector[B]]
  19. final def synchronized[T0](arg0: => T0): T0
    Definition Classes
    AnyRef
  20. def toString(): String
    Definition Classes
    AnyRef → Any
  21. def validationOneEpoch[I, M <: GenericModule[I, Variable], S, C](model: SupervisedModel[I, M], validationBatches: BatchStream[(I, STen), S, C], validationCallback: Option[ValidationCallback[M]], logger: Option[Logger], epochCount: Long): IO[Double]
  22. final def wait(arg0: Long, arg1: Int): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws(classOf[java.lang.InterruptedException])
  23. final def wait(arg0: Long): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws(classOf[java.lang.InterruptedException]) @native()
  24. final def wait(): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws(classOf[java.lang.InterruptedException])
  25. def withSWA[I, M <: GenericModule[I, Variable], LRState, LRStateSWA, BatchStreamState, BatchStreamBuffers](model: SupervisedModel[I, M], optimizerFactory: (Seq[(STen, PTag)]) => Optimizer, trainBatchesOverEpoch: (TrainingLoopContext) => BatchStream[(I, STen), BatchStreamState, BatchStreamBuffers], warmupEpochs: Int, swaEpochs: Int, validationBatchesOverEpoch: Option[(TrainingLoopContext) => BatchStream[(I, STen), BatchStreamState, BatchStreamBuffers]] = None, trainingCallback: Option[TrainingCallback[M]] = None, validationCallback: Option[ValidationCallback[M]] = None, checkpointState: Option[(SimpleThenSWALoopState, Either[LRState, LRStateSWA]) => IO[Unit]] = None, logger: Option[Logger] = None, returnMinValidationLossModel: Seq[Int] = Nil, learningRateSchedule: LearningRateSchedule[LRState] = LearningRateSchedule.decrement(20, 0.5), swaLearningRateSchedule: SWALearningRateSchedule[LRStateSWA] = SWA.SWALearningRateSchedule.cyclic( minFactor = 0.01, maxFactor = 1d, cycleLength = 10 ), prefetch: Boolean = false, dataParallelModels: Seq[SupervisedModel[I, M]] = Nil, initState: Option[SimpleThenSWALoopState] = None, accumulateGradientOverNBatches: Int = 1, learningRateScheduleInitState: Option[LRState] = None, swaLearningRateScheduleInitState: Option[LRStateSWA] = None, swaForwardPassAfterTraining: Boolean = true, validationLossExponentialSmoothingFactor: Double = 1.0)(implicit arg0: Load[M]): IO[(Int, SupervisedModel[I, M], List[(Int, Double, Option[(Double, Double)])], SupervisedModel[I, M])]
  26. object TrainingLoopContext extends Serializable

Deprecated Value Members

  1. def finalize(): Unit
    Attributes
    protected[lang]
    Definition Classes
    AnyRef
    Annotations
    @throws(classOf[java.lang.Throwable]) @Deprecated
    Deprecated

    (Since version 9)

Inherited from AnyRef

Inherited from Any

Ungrouped