object IOLoops
Contains a training loops and helpers around it
The two training loops implemented here are:
- lamp.data.IOLoops.epochs
- lamp.data.IOLoops.withSWA implements Stochastic Weight Averaging
Linear Supertypes
Ordering
- Alphabetic
- By Inheritance
Inherited
- IOLoops
- AnyRef
- Any
- Hide All
- Show All
Visibility
- Public
- Protected
Type Members
- case class TrainingLoopContext(epoch: Int, lastValidationLoss: Option[Double], minValidationLoss: Option[Double]) extends Product with Serializable
Value Members
- final def !=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- final def ##: Int
- Definition Classes
- AnyRef → Any
- final def ==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- final def asInstanceOf[T0]: T0
- Definition Classes
- Any
- def clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.CloneNotSupportedException]) @IntrinsicCandidate() @native()
- def epochs[I, M <: GenericModule[I, Variable], LRState, BatchStreamState, BatchStreamBuffers](model: SupervisedModel[I, M], optimizerFactory: (Seq[(STen, PTag)]) => Optimizer, trainBatchesOverEpoch: (TrainingLoopContext) => BatchStream[(I, STen), BatchStreamState, BatchStreamBuffers], validationBatchesOverEpoch: Option[(TrainingLoopContext) => BatchStream[(I, STen), BatchStreamState, BatchStreamBuffers]], epochs: Int, trainingCallback: Option[TrainingCallback[M]] = None, validationCallback: Option[ValidationCallback[M]] = None, checkpointState: Option[(SimpleLoopState, LRState) => IO[Unit]] = None, validationFrequency: Int = 1, logger: Option[Logger] = None, returnMinValidationLossModel: Seq[Int] = Nil, learningRateSchedule: LearningRateSchedule[LRState] = LearningRateSchedule.noop, prefetch: Boolean = false, overlapModelWithLoad: Boolean = false, dataParallelModels: Seq[SupervisedModel[I, M]] = Nil, initState: Option[SimpleLoopState] = None, accumulateGradientOverNBatches: Int = 1, learningRateScheduleInitState: Option[LRState] = None, printOptimizerAllocations: Boolean = false, validationLossExponentialSmoothingFactor: Double = 1.0)(implicit arg0: Load[M]): IO[(Int, SupervisedModel[I, M], List[(Int, Double, Option[(Double, Double)])], LRState, SimpleLoopState)]
- final def eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- def equals(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef → Any
- def forwardAndDiscardBatchStream[I, M <: GenericModule[I, Variable], S, C](batchStream: BatchStream[(I, STen), S, C], buffers: (Device) => Resource[IO, C], model: M with GenericModule[I, Variable]): IO[Unit]
- final def getClass(): Class[_ <: AnyRef]
- Definition Classes
- AnyRef → Any
- Annotations
- @IntrinsicCandidate() @native()
- def hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @IntrinsicCandidate() @native()
- final def isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- final def ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- final def notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @IntrinsicCandidate() @native()
- final def notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @IntrinsicCandidate() @native()
- def oneEpoch[I, M <: GenericModule[I, Variable], S, C](epochCount: Long, trainingCallback: Option[TrainingCallback[M]], model: ModelWithOptimizer[I, M], trainBatches: BatchStream[(I, STen), S, C], logger: Option[Logger], learningRateScheduleFactor: Double, prefetch: Boolean, overlapModelWithLoad: Boolean, accumulateGradientOverNBatches: Int): IO[Double]
- def parallelRunBatchStream[I, O, M <: GenericModule[I, O], S, O2, C](batchStream: BatchStream[(I, STen), S, C], bufferPerModel: Resource[IO, List[(Device, C)]], models: Seq[M with GenericModule[I, O]])(tx: ((I, STen), O) => O2)(implicit arg0: Movable[O2], scope: Scope): IO[Vector[O2]]
- def runBatchStream[A, B, M <: GenericModule[A, B], S, C](batchStream: BatchStream[A, S, C], buffers: Resource[IO, C], model: M with GenericModule[A, B])(implicit arg0: Movable[B], scope: Scope): IO[Vector[B]]
- final def synchronized[T0](arg0: => T0): T0
- Definition Classes
- AnyRef
- def toString(): String
- Definition Classes
- AnyRef → Any
- def validationOneEpoch[I, M <: GenericModule[I, Variable], S, C](model: SupervisedModel[I, M], validationBatches: BatchStream[(I, STen), S, C], validationCallback: Option[ValidationCallback[M]], logger: Option[Logger], epochCount: Long): IO[Double]
- final def wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- final def wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException]) @native()
- final def wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- def withSWA[I, M <: GenericModule[I, Variable], LRState, LRStateSWA, BatchStreamState, BatchStreamBuffers](model: SupervisedModel[I, M], optimizerFactory: (Seq[(STen, PTag)]) => Optimizer, trainBatchesOverEpoch: (TrainingLoopContext) => BatchStream[(I, STen), BatchStreamState, BatchStreamBuffers], warmupEpochs: Int, swaEpochs: Int, validationBatchesOverEpoch: Option[(TrainingLoopContext) => BatchStream[(I, STen), BatchStreamState, BatchStreamBuffers]] = None, trainingCallback: Option[TrainingCallback[M]] = None, validationCallback: Option[ValidationCallback[M]] = None, checkpointState: Option[(SimpleThenSWALoopState, Either[LRState, LRStateSWA]) => IO[Unit]] = None, logger: Option[Logger] = None, returnMinValidationLossModel: Seq[Int] = Nil, learningRateSchedule: LearningRateSchedule[LRState] = LearningRateSchedule.decrement(20, 0.5), swaLearningRateSchedule: SWALearningRateSchedule[LRStateSWA] = SWA.SWALearningRateSchedule.cyclic( minFactor = 0.01, maxFactor = 1d, cycleLength = 10 ), prefetch: Boolean = false, dataParallelModels: Seq[SupervisedModel[I, M]] = Nil, initState: Option[SimpleThenSWALoopState] = None, accumulateGradientOverNBatches: Int = 1, learningRateScheduleInitState: Option[LRState] = None, swaLearningRateScheduleInitState: Option[LRStateSWA] = None, swaForwardPassAfterTraining: Boolean = true, validationLossExponentialSmoothingFactor: Double = 1.0)(implicit arg0: Load[M]): IO[(Int, SupervisedModel[I, M], List[(Int, Double, Option[(Double, Double)])], SupervisedModel[I, M])]
- object TrainingLoopContext extends Serializable
Deprecated Value Members
- def finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.Throwable]) @Deprecated
- Deprecated
(Since version 9)