.. _program_listing_file_src_training_graph_group_sync.h: Program Listing for File graph_group_sync.h =========================================== |exhale_lsh| :ref:`Return to documentation for file ` (``src/training/graph_group_sync.h``) .. |exhale_lsh| unicode:: U+021B0 .. UPWARDS ARROW WITH TIP LEFTWARDS .. code-block:: cpp #pragma once #include "optimizers/quantizer.h" #include "training/graph_group.h" namespace marian { class SyncGraphGroup : public GraphGroup { using Base = GraphGroup; const double delay_{1.}; // optimizer-delay parameter. Fractional means to use a fraction of whatever the MB size is // @TODO: instead, create an array of ExponentialSmoothing objects, and don't use ExponentialSmoothing as a base class std::vector> paramsAllocs_; // [deviceIndex] we must hold a reference to the memory until this class dies // model quantizer std::vector> quantizers_; // state for update() bool first_{ true }; // gets interpreted and cleared by update() std::vector> pendingBatches_; // in case of dynamic MB-size scaling, we temporarly buffer up batches across update() calls until enough double updateMultiplier_{1}; // multiplier not applied in collectStats() (no multiplier if not mini-batch-fit) void initialize(const Ptr& exampleBatch); bool tryGetSubBatches(Ptr newBatch, std::vector>& subBatches, size_t& numReadBatches); void update(std::vector> subBatches, size_t numReadBatches); public: SyncGraphGroup(Ptr config, Ptr mpi); void setScheduler(Ptr scheduler) override; void update(Ptr batch) override; Ptr collectStats(const std::vector>&) override; void finalize() override; // @TODO: consider to make this a virtual as well? Currently it is a template dispatch }; } // namespace marian