"Fossies" - the Fresh Open Source Software Archive

Member "ssr-0.4.2/src/AV/Output/Synchronizer.cpp" (18 May 2020, 39772 Bytes) of package /linux/privat/ssr-0.4.2.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "Synchronizer.cpp" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 0.4.1_vs_0.4.2.

    1 /*
    2 Copyright (c) 2012-2020 Maarten Baert <maarten-baert@hotmail.com>
    3 
    4 This file is part of SimpleScreenRecorder.
    5 
    6 SimpleScreenRecorder is free software: you can redistribute it and/or modify
    7 it under the terms of the GNU General Public License as published by
    8 the Free Software Foundation, either version 3 of the License, or
    9 (at your option) any later version.
   10 
   11 SimpleScreenRecorder is distributed in the hope that it will be useful,
   12 but WITHOUT ANY WARRANTY; without even the implied warranty of
   13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   14 GNU General Public License for more details.
   15 
   16 You should have received a copy of the GNU General Public License
   17 along with SimpleScreenRecorder.  If not, see <http://www.gnu.org/licenses/>.
   18 */
   19 
   20 #include "Synchronizer.h"
   21 
   22 #include "Logger.h"
   23 #include "CommandLineOptions.h"
   24 #include "OutputManager.h"
   25 #include "OutputSettings.h"
   26 #include "VideoEncoder.h"
   27 #include "AudioEncoder.h"
   28 #include "SampleCast.h"
   29 #include "SyncDiagram.h"
   30 
   31 // The amount of filtering applied to audio timestamps to reduce noise. Higher values reduce timestamp noise (and associated drift correction),
   32 // but if the value is too high, it will take more time to detect gaps.
   33 const int64_t Synchronizer::AUDIO_TIMESTAMP_FILTER = 20;
   34 
   35 // These values change how fast the synchronizer does drift correction.
   36 // If this value is too low, the error will not be corrected fast enough. But if the value is too high, the audio
   37 // may get weird speed fluctuations caused by the limited accuracy of the recording timestamps.
   38 // The difference between sample length and time length has a lot of noise and can't be used directly,
   39 // so it is averaged out using exponential smoothing. However, since the difference tends to increase gradually over time,
   40 // exponential smoothing would constantly lag behind, so instead of simple proportional feedback, I use a PI controller.
   41 // For critical damping, choose I = P*P/4.
   42 const double Synchronizer::DRIFT_CORRECTION_P = 0.3;
   43 const double Synchronizer::DRIFT_CORRECTION_I = 0.3 * 0.3 / 4.0;
   44 
   45 // The maximum audio/video desynchronization allowed, in seconds. If the error is greater than this value, the synchronizer will insert zeros
   46 // rather than relying on normal drift correction. This is something that should be avoided since it will result in noticeable interruptions,
   47 // so it should only be triggered when something is really wrong. If the error is smaller, the synchronizer will do nothing and the
   48 // drift correction system will take care of it (eventually).
   49 const double Synchronizer::DRIFT_ERROR_THRESHOLD = 0.05;
   50 
   51 // The maximum block size for drift correction, in seconds. This is needed to avoid numerical problems in the feedback system.
   52 const double Synchronizer::DRIFT_MAX_BLOCK = 0.5;
   53 
   54 // The maximum number of video frames and audio samples that will be buffered. This should be enough to cope with the fact that video and
   55 // audio don't arrive at the same time, but not too high because that would cause memory problems if one of the inputs fails.
   56 // The limit for audio can be set very high, because audio uses almost no memory.
   57 const size_t Synchronizer::MAX_VIDEO_FRAMES_BUFFERED = 30;
   58 const size_t Synchronizer::MAX_AUDIO_SAMPLES_BUFFERED = 1000000;
   59 
   60 // The maximum delay between video frames, in microseconds. If the delay is longer, duplicates will be inserted.
   61 // This is needed because some video codecs/players can't handle long delays.
   62 const int64_t Synchronizer::MAX_FRAME_DELAY = 200000;
   63 
   64 static std::unique_ptr<AVFrameWrapper> CreateVideoFrame(unsigned int width, unsigned int height, AVPixelFormat pixel_format, const std::shared_ptr<AVFrameData>& reuse_data) {
   65 
   66     // get required planes
   67     unsigned int planes = 0;
   68     size_t linesize[3] = {0}, planesize[3] = {0};
   69     switch(pixel_format) {
   70         case AV_PIX_FMT_YUV444P: {
   71             // Y/U/V = 1 byte per pixel
   72             planes = 3;
   73             linesize[0]  = grow_align16(width); planesize[0] = linesize[0] * height;
   74             linesize[1]  = grow_align16(width); planesize[1] = linesize[1] * height;
   75             linesize[2]  = grow_align16(width); planesize[2] = linesize[2] * height;
   76             break;
   77         }
   78         case AV_PIX_FMT_YUV422P: {
   79             // Y = 1 byte per pixel, U/V = 1 byte per 2x1 pixels
   80             assert(width % 2 == 0);
   81             planes = 3;
   82             linesize[0]  = grow_align16(width    ); planesize[0] = linesize[0] * height;
   83             linesize[1]  = grow_align16(width / 2); planesize[1] = linesize[1] * height;
   84             linesize[2]  = grow_align16(width / 2); planesize[2] = linesize[2] * height;
   85             break;
   86         }
   87         case AV_PIX_FMT_YUV420P: {
   88             // Y = 1 byte per pixel, U/V = 1 byte per 2x2 pixels
   89             assert(width % 2 == 0);
   90             assert(height % 2 == 0);
   91             planes = 3;
   92             linesize[0]  = grow_align16(width    ); planesize[0] = linesize[0] * height    ;
   93             linesize[1]  = grow_align16(width / 2); planesize[1] = linesize[1] * height / 2;
   94             linesize[2]  = grow_align16(width / 2); planesize[2] = linesize[2] * height / 2;
   95             break;
   96         }
   97         case AV_PIX_FMT_NV12: {
   98             assert(width % 2 == 0);
   99             assert(height % 2 == 0);
  100             // planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved
  101             // Y = 1 byte per pixel, U/V = 1 byte per 2x2 pixels
  102             planes = 2;
  103             linesize[0]  = grow_align16(width); planesize[0] = linesize[0] * height    ;
  104             linesize[1]  = grow_align16(width); planesize[1] = linesize[1] * height / 2;
  105             break;
  106         }
  107         case AV_PIX_FMT_BGRA: {
  108             // BGRA = 4 bytes per pixel
  109             planes = 1;
  110             linesize[0] = grow_align16(width * 4); planesize[0] = linesize[0] * height;
  111             break;
  112         }
  113         case AV_PIX_FMT_BGR24:
  114         case AV_PIX_FMT_RGB24: {
  115             // BGR/RGB = 3 bytes per pixel
  116             planes = 1;
  117             linesize[0] = grow_align16(width * 3); planesize[0] = linesize[0] * height;
  118             break;
  119         }
  120         default: assert(false); break;
  121     }
  122 
  123     // create the frame
  124     size_t totalsize = 0;
  125     for(unsigned int p = 0; p < planes; ++p) {
  126         totalsize += planesize[p];
  127     }
  128     std::shared_ptr<AVFrameData> frame_data = (reuse_data == NULL)? std::make_shared<AVFrameData>(totalsize) : reuse_data;
  129     std::unique_ptr<AVFrameWrapper> frame(new AVFrameWrapper(frame_data));
  130     uint8_t *data = frame->GetRawData();
  131     for(unsigned int p = 0; p < planes; ++p) {
  132         frame->GetFrame()->data[p] = data;
  133         frame->GetFrame()->linesize[p] = linesize[p];
  134         data += planesize[p];
  135     }
  136 #if SSR_USE_AVFRAME_WIDTH_HEIGHT
  137     frame->GetFrame()->width = width;
  138     frame->GetFrame()->height = height;
  139 #endif
  140 #if SSR_USE_AVFRAME_FORMAT
  141     frame->GetFrame()->format = pixel_format;
  142 #endif
  143 #if SSR_USE_AVFRAME_SAR
  144     frame->GetFrame()->sample_aspect_ratio.num = 1;
  145     frame->GetFrame()->sample_aspect_ratio.den = 1;
  146 #endif
  147 
  148     return frame;
  149 
  150 }
  151 
  152 static std::unique_ptr<AVFrameWrapper> CreateAudioFrame(unsigned int channels, unsigned int sample_rate, unsigned int samples, unsigned int planes, AVSampleFormat sample_format) {
  153 
  154     // get required sample size
  155     // note: sample_size = sizeof(sampletype) * channels
  156     unsigned int sample_size = 0; // to keep GCC happy
  157     switch(sample_format) {
  158         case AV_SAMPLE_FMT_S16:
  159 #if SSR_USE_AVUTIL_PLANAR_SAMPLE_FMT
  160         case AV_SAMPLE_FMT_S16P:
  161 #endif
  162             sample_size = channels * sizeof(int16_t); break;
  163         case AV_SAMPLE_FMT_FLT:
  164 #if SSR_USE_AVUTIL_PLANAR_SAMPLE_FMT
  165         case AV_SAMPLE_FMT_FLTP:
  166 #endif
  167             sample_size = channels * sizeof(float); break;
  168         default: assert(false); break;
  169     }
  170 
  171     // create the frame
  172     size_t plane_size = grow_align16(samples * sample_size / planes);
  173     std::shared_ptr<AVFrameData> frame_data = std::make_shared<AVFrameData>(plane_size * planes);
  174     std::unique_ptr<AVFrameWrapper> frame(new AVFrameWrapper(frame_data));
  175     for(unsigned int p = 0; p < planes; ++p) {
  176         frame->GetFrame()->data[p] = frame->GetRawData() + plane_size * p;
  177         frame->GetFrame()->linesize[p] = samples * sample_size / planes;
  178     }
  179 #if SSR_USE_AVFRAME_NB_SAMPLES
  180     frame->GetFrame()->nb_samples = samples;
  181 #endif
  182 #if SSR_USE_AVFRAME_CHANNELS
  183     frame->GetFrame()->channels = channels;
  184 #endif
  185 #if SSR_USE_AVFRAME_SAMPLE_RATE
  186     frame->GetFrame()->sample_rate = sample_rate;
  187 #endif
  188 #if SSR_USE_AVFRAME_FORMAT
  189     frame->GetFrame()->format = sample_format;
  190 #endif
  191 
  192     return frame;
  193 
  194 }
  195 
  196 Synchronizer::Synchronizer(OutputManager *output_manager) {
  197 
  198     m_output_manager = output_manager;
  199     m_output_settings = m_output_manager->GetOutputSettings();
  200     m_output_format = m_output_manager->GetOutputFormat();
  201     assert(m_output_format->m_video_enabled || m_output_format->m_audio_enabled);
  202 
  203     try {
  204         Init();
  205     } catch(...) {
  206         Free();
  207         throw;
  208     }
  209 
  210 }
  211 
  212 Synchronizer::~Synchronizer() {
  213 
  214     // disconnect
  215     ConnectVideoSource(NULL);
  216     ConnectAudioSource(NULL);
  217 
  218     // tell the thread to stop
  219     if(m_thread.joinable()) {
  220         Logger::LogInfo("[Synchronizer::~Synchronizer] " + Logger::tr("Stopping synchronizer thread ..."));
  221         m_should_stop = true;
  222         m_thread.join();
  223     }
  224 
  225     // flush one more time
  226     {
  227         SharedLock lock(&m_shared_data);
  228         FlushBuffers(lock.get());
  229     }
  230 
  231     // free everything
  232     Free();
  233 
  234 }
  235 
  236 void Synchronizer::Init() {
  237 
  238     // initialize video
  239     if(m_output_format->m_video_enabled) {
  240         m_max_frames_skipped = (m_output_settings->video_allow_frame_skipping)? (MAX_FRAME_DELAY * m_output_format->m_video_frame_rate + 500000) / 1000000 : 0;
  241         VideoLock videolock(&m_video_data);
  242         videolock->m_last_timestamp = std::numeric_limits<int64_t>::min();
  243         videolock->m_next_timestamp = SINK_TIMESTAMP_ASAP;
  244     }
  245 
  246     // initialize audio
  247     if(m_output_format->m_audio_enabled) {
  248         AudioLock audiolock(&m_audio_data);
  249         audiolock->m_fast_resampler.reset(new FastResampler(m_output_format->m_audio_channels, 0.9f));
  250         InitAudioSegment(audiolock.get());
  251         audiolock->m_warn_desync = true;
  252     }
  253 
  254     // create sync diagram
  255     if(CommandLineOptions::GetSyncDiagram()) {
  256         m_sync_diagram.reset(new SyncDiagram(4));
  257         m_sync_diagram->SetChannelName(0, SyncDiagram::tr("Video in"));
  258         m_sync_diagram->SetChannelName(1, SyncDiagram::tr("Audio in"));
  259         m_sync_diagram->SetChannelName(2, SyncDiagram::tr("Video out"));
  260         m_sync_diagram->SetChannelName(3, SyncDiagram::tr("Audio out"));
  261         m_sync_diagram->show();
  262     }
  263 
  264     // initialize shared data
  265     {
  266         SharedLock lock(&m_shared_data);
  267 
  268         if(m_output_format->m_audio_enabled) {
  269             lock->m_partial_audio_frame.Alloc(m_output_format->m_audio_frame_size * m_output_format->m_audio_channels);
  270             lock->m_partial_audio_frame_samples = 0;
  271         }
  272         lock->m_video_pts = 0;
  273         lock->m_audio_samples = 0;
  274         lock->m_time_offset = 0;
  275 
  276         InitSegment(lock.get());
  277 
  278         lock->m_warn_drop_video = true;
  279 
  280     }
  281 
  282     // start synchronizer thread
  283     m_should_stop = false;
  284     m_error_occurred = false;
  285     m_thread = std::thread(&Synchronizer::SynchronizerThread, this);
  286 
  287 }
  288 
  289 void Synchronizer::Free() {
  290 
  291 }
  292 
  293 void Synchronizer::NewSegment() {
  294 
  295     if(m_output_format->m_audio_enabled) {
  296         AudioLock audiolock(&m_audio_data);
  297         InitAudioSegment(audiolock.get());
  298     }
  299 
  300     SharedLock lock(&m_shared_data);
  301     NewSegment(lock.get());
  302 
  303 }
  304 
  305 int64_t Synchronizer::GetTotalTime() {
  306     SharedLock lock(&m_shared_data);
  307     return GetTotalTime(lock.get());
  308 }
  309 
  310 int64_t Synchronizer::GetNextVideoTimestamp() {
  311     assert(m_output_format->m_video_enabled);
  312     VideoLock videolock(&m_video_data);
  313     return videolock->m_next_timestamp;
  314 }
  315 
  316 void Synchronizer::ReadVideoFrame(unsigned int width, unsigned int height, const uint8_t* data, int stride, AVPixelFormat format, int64_t timestamp) {
  317     assert(m_output_format->m_video_enabled);
  318 
  319     // add new block to sync diagram
  320     if(m_sync_diagram != NULL)
  321         m_sync_diagram->AddBlock(0, (double) timestamp * 1.0e-6, (double) timestamp * 1.0e-6 + 1.0 / (double) m_output_format->m_video_frame_rate, QColor(255, 0, 0));
  322 
  323     VideoLock videolock(&m_video_data);
  324 
  325     // check the timestamp
  326     if(timestamp < videolock->m_last_timestamp) {
  327         if(timestamp < videolock->m_last_timestamp - 10000)
  328             Logger::LogWarning("[Synchronizer::ReadVideoFrame] " + Logger::tr("Warning: Received video frame with non-monotonic timestamp."));
  329         timestamp = videolock->m_last_timestamp;
  330     }
  331 
  332     // drop the frame if it is too early (before converting it)
  333     if(videolock->m_next_timestamp != SINK_TIMESTAMP_ASAP && timestamp < videolock->m_next_timestamp - (int64_t) (1000000 / m_output_format->m_video_frame_rate))
  334         return;
  335 
  336     // update the timestamps
  337     videolock->m_last_timestamp = timestamp;
  338     videolock->m_next_timestamp = std::max(videolock->m_next_timestamp + (int64_t) (1000000 / m_output_format->m_video_frame_rate), timestamp);
  339 
  340     // create the converted frame
  341     std::unique_ptr<AVFrameWrapper> converted_frame = CreateVideoFrame(m_output_format->m_video_width, m_output_format->m_video_height, m_output_format->m_video_pixel_format, NULL);
  342 
  343     // scale and convert the frame to the right format
  344     videolock->m_fast_scaler.Scale(width, height, format, &data, &stride,
  345             m_output_format->m_video_width, m_output_format->m_video_height, m_output_format->m_video_pixel_format,
  346             converted_frame->GetFrame()->data, converted_frame->GetFrame()->linesize);
  347 
  348     SharedLock lock(&m_shared_data);
  349 
  350     // avoid memory problems by limiting the video buffer size
  351     if(lock->m_video_buffer.size() >= MAX_VIDEO_FRAMES_BUFFERED) {
  352         if(lock->m_segment_audio_started) {
  353             if(lock->m_warn_drop_video) {
  354                 lock->m_warn_drop_video = false;
  355                 Logger::LogWarning("[Synchronizer::ReadVideoFrame] " + Logger::tr("Warning: Video buffer overflow, some frames will be lost. The audio input seems to be too slow."));
  356             }
  357             return;
  358         } else {
  359             // if the audio hasn't started yet, it makes more sense to drop the oldest frames
  360             lock->m_video_buffer.pop_front();
  361             assert(lock->m_video_buffer.size() > 0);
  362             lock->m_segment_video_start_time = lock->m_video_buffer.front()->GetFrame()->pts;
  363         }
  364     }
  365 
  366     // start video
  367     if(!lock->m_segment_video_started) {
  368         lock->m_segment_video_started = true;
  369         lock->m_segment_video_start_time = timestamp;
  370         lock->m_segment_video_stop_time = timestamp;
  371     }
  372 
  373     // store the frame
  374     converted_frame->GetFrame()->pts = timestamp;
  375     lock->m_video_buffer.push_back(std::move(converted_frame));
  376 
  377     // increase the segment stop time
  378     lock->m_segment_video_stop_time = timestamp + (int64_t) (1000000 / m_output_format->m_video_frame_rate);
  379 
  380 }
  381 
  382 void Synchronizer::ReadVideoPing(int64_t timestamp) {
  383     assert(m_output_format->m_video_enabled);
  384 
  385     SharedLock lock(&m_shared_data);
  386 
  387     // if the video has not been started, ignore it
  388     if(!lock->m_segment_video_started)
  389         return;
  390 
  391     // increase the segment stop time
  392     lock->m_segment_video_stop_time = std::max(lock->m_segment_video_stop_time, timestamp + (int64_t) (1000000 / m_output_format->m_video_frame_rate));
  393 
  394 }
  395 
  396 void Synchronizer::ReadAudioSamples(unsigned int channels, unsigned int sample_rate, AVSampleFormat format, unsigned int sample_count, const uint8_t* data, int64_t timestamp) {
  397     assert(m_output_format->m_audio_enabled);
  398 
  399     // sanity check
  400     if(sample_count == 0)
  401         return;
  402 
  403     // add new block to sync diagram
  404     if(m_sync_diagram != NULL)
  405         m_sync_diagram->AddBlock(1, (double) timestamp * 1.0e-6, (double) timestamp * 1.0e-6 + (double) sample_count / (double) sample_rate, QColor(0, 255, 0));
  406 
  407     AudioLock audiolock(&m_audio_data);
  408 
  409     // check the timestamp
  410     if(timestamp < audiolock->m_last_timestamp) {
  411         if(timestamp < audiolock->m_last_timestamp - 10000)
  412             Logger::LogWarning("[Synchronizer::ReadAudioSamples] " + Logger::tr("Warning: Received audio samples with non-monotonic timestamp."));
  413         timestamp = audiolock->m_last_timestamp;
  414     }
  415 
  416     // update the timestamps
  417     int64_t previous_timestamp;
  418     if(audiolock->m_first_timestamp == (int64_t) AV_NOPTS_VALUE) {
  419         audiolock->m_filtered_timestamp = timestamp;
  420         audiolock->m_first_timestamp = timestamp;
  421         previous_timestamp = timestamp;
  422     } else {
  423         previous_timestamp = audiolock->m_last_timestamp;
  424     }
  425     audiolock->m_last_timestamp = timestamp;
  426 
  427     // filter the timestamp
  428     int64_t timestamp_delta = (int64_t) sample_count * (int64_t) 1000000 / (int64_t) sample_rate;
  429     audiolock->m_filtered_timestamp += (timestamp - audiolock->m_filtered_timestamp) / AUDIO_TIMESTAMP_FILTER;
  430 
  431     // calculate drift
  432     double current_drift = GetAudioDrift(audiolock.get());
  433 
  434     // if there are too many audio samples, drop some of them (unlikely unless you use PulseAudio)
  435     if(current_drift > DRIFT_ERROR_THRESHOLD && !audiolock->m_drop_samples) {
  436         audiolock->m_drop_samples = true;
  437         Logger::LogWarning("[Synchronizer::ReadAudioSamples] " + Logger::tr("Warning: Too many audio samples, dropping samples to keep the audio in sync with the video."));
  438     }
  439 
  440     // if there are not enough audio samples, insert zeros
  441     if(current_drift < -DRIFT_ERROR_THRESHOLD && !audiolock->m_insert_samples) {
  442         audiolock->m_insert_samples = true;
  443         Logger::LogWarning("[Synchronizer::ReadAudioSamples] " + Logger::tr("Warning: Not enough audio samples, inserting silence to keep the audio in sync with the video."));
  444     }
  445 
  446     // reset filter and recalculate drift if necessary
  447     if(audiolock->m_drop_samples || audiolock->m_insert_samples) {
  448         audiolock->m_filtered_timestamp = timestamp;
  449         current_drift = GetAudioDrift(audiolock.get());
  450     }
  451 
  452     // drop samples
  453     if(audiolock->m_drop_samples) {
  454         audiolock->m_drop_samples = false;
  455 
  456         // drop samples
  457         int n = (int) round(current_drift * (double) sample_rate);
  458         if(n > 0) {
  459             if(n >= (int) sample_count) {
  460                 audiolock->m_drop_samples = true;
  461                 return; // drop all samples
  462             }
  463             if(format == AV_SAMPLE_FMT_FLT) {
  464                 data += n * channels * sizeof(float);
  465             } else if(format == AV_SAMPLE_FMT_S16) {
  466                 data += n * channels * sizeof(int16_t);
  467             } else if(format == AV_SAMPLE_FMT_S32) {
  468                 data += n * channels * sizeof(int32_t);
  469             } else {
  470                 assert(false);
  471             }
  472             sample_count -= n;
  473         }
  474 
  475     }
  476 
  477     // insert zeros
  478     unsigned int sample_count_out = 0;
  479     if(audiolock->m_insert_samples) {
  480         audiolock->m_insert_samples = false;
  481 
  482         // how many samples should be inserted?
  483         int n = (int) round(-current_drift * (double) sample_rate);
  484         if(n > 0) {
  485 
  486             // insert zeros
  487             audiolock->m_temp_input_buffer.Alloc(n * m_output_format->m_audio_channels);
  488             std::fill_n(audiolock->m_temp_input_buffer.GetData(), n * m_output_format->m_audio_channels, 0.0f);
  489             sample_count_out = audiolock->m_fast_resampler->Resample((double) sample_rate / (double) m_output_format->m_audio_sample_rate, 1.0,
  490                                                                      audiolock->m_temp_input_buffer.GetData(), n, &audiolock->m_temp_output_buffer, sample_count_out);
  491 
  492             // recalculate drift
  493             current_drift = GetAudioDrift(audiolock.get(), sample_count_out);
  494 
  495         }
  496 
  497     }
  498 
  499     // increase filtered timestamp
  500     audiolock->m_filtered_timestamp += timestamp_delta;
  501 
  502     // do drift correction
  503     // The point of drift correction is to keep video and audio in sync even when the clocks are not running at exactly the same speed.
  504     // This can happen because the sample rate of the sound card is not always 100% accurate. Even a 0.1% error will result in audio that is
  505     // seconds too early or too late at the end of a one hour video. This problem doesn't occur on all computers though (I'm not sure why).
  506     // Another cause of desynchronization is problems/glitches with PulseAudio (e.g. jumps in time when switching between sources).
  507     double drift_correction_dt = fmin((double) (timestamp - previous_timestamp) * 1.0e-6, DRIFT_MAX_BLOCK);
  508     audiolock->m_average_drift = clamp(audiolock->m_average_drift + DRIFT_CORRECTION_I * current_drift * drift_correction_dt, -0.5, 0.5);
  509     if(audiolock->m_average_drift < -0.02 && audiolock->m_warn_desync) {
  510         audiolock->m_warn_desync = false;
  511         Logger::LogWarning("[Synchronizer::ReadAudioSamples] " + Logger::tr("Warning: Audio input is more than 2% too slow!"));
  512     }
  513     if(audiolock->m_average_drift > 0.02 && audiolock->m_warn_desync) {
  514         audiolock->m_warn_desync = false;
  515         Logger::LogWarning("[Synchronizer::ReadAudioSamples] " + Logger::tr("Warning: Audio input is more than 2% too fast!"));
  516     }
  517     double length = (double) sample_count / (double) sample_rate;
  518     double drift_correction = clamp(DRIFT_CORRECTION_P * current_drift + audiolock->m_average_drift, -0.5, 0.5) * fmin(1.0, DRIFT_MAX_BLOCK / length);
  519 
  520     //qDebug() << "current_drift" << current_drift << "average_drift" << audiolock->m_average_drift << "drift_correction" << drift_correction;
  521 
  522     // convert the samples
  523     const float *data_float = NULL; // to keep GCC happy
  524     if(format == AV_SAMPLE_FMT_FLT) {
  525         if(channels == m_output_format->m_audio_channels) {
  526             data_float = (const float*) data;
  527         } else {
  528             audiolock->m_temp_input_buffer.Alloc(sample_count * m_output_format->m_audio_channels);
  529             data_float = audiolock->m_temp_input_buffer.GetData();
  530             SampleChannelRemap(sample_count, (const float*) data, channels, audiolock->m_temp_input_buffer.GetData(), m_output_format->m_audio_channels);
  531         }
  532     } else if(format == AV_SAMPLE_FMT_S16) {
  533         audiolock->m_temp_input_buffer.Alloc(sample_count * m_output_format->m_audio_channels);
  534         data_float = audiolock->m_temp_input_buffer.GetData();
  535         SampleChannelRemap(sample_count, (const int16_t*) data, channels, audiolock->m_temp_input_buffer.GetData(), m_output_format->m_audio_channels);
  536     } else if(format == AV_SAMPLE_FMT_S32) {
  537         audiolock->m_temp_input_buffer.Alloc(sample_count * m_output_format->m_audio_channels);
  538         data_float = audiolock->m_temp_input_buffer.GetData();
  539         SampleChannelRemap(sample_count, (const int32_t*) data, channels, audiolock->m_temp_input_buffer.GetData(), m_output_format->m_audio_channels);
  540     } else {
  541         assert(false);
  542     }
  543 
  544     // resample
  545     sample_count_out = audiolock->m_fast_resampler->Resample((double) sample_rate / (double) m_output_format->m_audio_sample_rate, 1.0 / (1.0 - drift_correction),
  546                                                              data_float, sample_count, &audiolock->m_temp_output_buffer, sample_count_out);
  547     audiolock->m_samples_written += sample_count_out;
  548 
  549     SharedLock lock(&m_shared_data);
  550 
  551     // avoid memory problems by limiting the audio buffer size
  552     if(lock->m_audio_buffer.GetSize() / m_output_format->m_audio_channels >= MAX_AUDIO_SAMPLES_BUFFERED) {
  553         if(lock->m_segment_video_started) {
  554             Logger::LogWarning("[Synchronizer::ReadAudioSamples] " + Logger::tr("Warning: Audio buffer overflow, starting new segment to keep the audio in sync with the video "
  555                                                                                 "(some video and/or audio may be lost). The video input seems to be too slow."));
  556             NewSegment(lock.get());
  557         } else {
  558             // If the video hasn't started yet, it makes more sense to drop the oldest samples.
  559             // Shifting the start time like this isn't completely accurate, but this shouldn't happen often anyway.
  560             // The number of samples dropped is calculated so that the buffer will be 90% full after this.
  561             size_t n = lock->m_audio_buffer.GetSize() / m_output_format->m_audio_channels - MAX_AUDIO_SAMPLES_BUFFERED * 9 / 10;
  562             lock->m_audio_buffer.Pop(n * m_output_format->m_audio_channels);
  563             lock->m_segment_audio_start_time += (int64_t) round((double) n / (double) m_output_format->m_audio_sample_rate * 1.0e6);
  564         }
  565     }
  566 
  567     // start audio
  568     if(!lock->m_segment_audio_started) {
  569         lock->m_segment_audio_started = true;
  570         lock->m_segment_audio_start_time = timestamp;
  571         lock->m_segment_audio_stop_time = timestamp;
  572     }
  573 
  574     // store the samples
  575     lock->m_audio_buffer.Push(audiolock->m_temp_output_buffer.GetData(), sample_count_out * m_output_format->m_audio_channels);
  576 
  577     // increase segment stop time
  578     double new_sample_length = (double) (lock->m_segment_audio_samples_read + lock->m_audio_buffer.GetSize() / m_output_format->m_audio_channels) / (double) m_output_format->m_audio_sample_rate;
  579     lock->m_segment_audio_stop_time = lock->m_segment_audio_start_time + (int64_t) round(new_sample_length * 1.0e6);
  580 
  581 }
  582 
  583 void Synchronizer::ReadAudioHole() {
  584     assert(m_output_format->m_audio_enabled);
  585 
  586     AudioLock audiolock(&m_audio_data);
  587     if(audiolock->m_first_timestamp != (int64_t) AV_NOPTS_VALUE) {
  588         audiolock->m_average_drift = 0.0;
  589         if(!audiolock->m_drop_samples || !audiolock->m_insert_samples) {
  590             Logger::LogWarning("[Synchronizer::ReadAudioHole] " + Logger::tr("Warning: Received hole in audio stream, inserting silence to keep the audio in sync with the video."));
  591             audiolock->m_drop_samples = true; // because PulseAudio is weird
  592             audiolock->m_insert_samples = true;
  593         }
  594     }
  595 
  596 }
  597 
  598 void Synchronizer::InitAudioSegment(AudioData* audiolock) {
  599     audiolock->m_last_timestamp = std::numeric_limits<int64_t>::min();
  600     audiolock->m_first_timestamp = AV_NOPTS_VALUE;
  601     audiolock->m_samples_written = 0;
  602     audiolock->m_average_drift = 0.0;
  603     audiolock->m_drop_samples = false;
  604     audiolock->m_insert_samples = false;
  605 }
  606 
  607 double Synchronizer::GetAudioDrift(AudioData* audiolock, unsigned int extra_samples) {
  608     double sample_length = ((double) (audiolock->m_samples_written + extra_samples) + audiolock->m_fast_resampler->GetOutputLatency()) / (double) m_output_format->m_audio_sample_rate;
  609     double time_length = (double) (audiolock->m_filtered_timestamp - audiolock->m_first_timestamp) * 1.0e-6;
  610     return sample_length - time_length;
  611 }
  612 
  613 void Synchronizer::NewSegment(SharedData* lock) {
  614     FlushBuffers(lock);
  615     if(lock->m_segment_video_started && lock->m_segment_audio_started) {
  616         int64_t segment_start_time, segment_stop_time;
  617         GetSegmentStartStop(lock, &segment_start_time, &segment_stop_time);
  618         lock->m_time_offset += std::max((int64_t) 0, segment_stop_time - segment_start_time);
  619     }
  620     lock->m_video_buffer.clear();
  621     lock->m_audio_buffer.Clear();
  622     InitSegment(lock);
  623 }
  624 
  625 void Synchronizer::InitSegment(SharedData* lock) {
  626     lock->m_segment_video_started = !m_output_format->m_video_enabled;
  627     lock->m_segment_audio_started = !m_output_format->m_audio_enabled;
  628     lock->m_segment_video_start_time = AV_NOPTS_VALUE;
  629     lock->m_segment_audio_start_time = AV_NOPTS_VALUE;
  630     lock->m_segment_video_stop_time = AV_NOPTS_VALUE;
  631     lock->m_segment_audio_stop_time = AV_NOPTS_VALUE;
  632     lock->m_segment_audio_can_drop = true;
  633     lock->m_segment_audio_samples_read = 0;
  634     lock->m_segment_video_accumulated_delay = 0;
  635 }
  636 
  637 int64_t Synchronizer::GetTotalTime(Synchronizer::SharedData* lock) {
  638     if(lock->m_segment_video_started && lock->m_segment_audio_started) {
  639         int64_t segment_start_time, segment_stop_time;
  640         GetSegmentStartStop(lock, &segment_start_time, &segment_stop_time);
  641         return lock->m_time_offset + std::max((int64_t) 0, segment_stop_time - segment_start_time);
  642     } else {
  643         return lock->m_time_offset;
  644     }
  645 }
  646 
  647 void Synchronizer::GetSegmentStartStop(SharedData* lock, int64_t* segment_start_time, int64_t* segment_stop_time) {
  648     if(!m_output_format->m_audio_enabled) {
  649         *segment_start_time = lock->m_segment_video_start_time;
  650         *segment_stop_time = lock->m_segment_video_stop_time;
  651     } else if(!m_output_format->m_video_enabled) {
  652         *segment_start_time = lock->m_segment_audio_start_time;
  653         *segment_stop_time = lock->m_segment_audio_stop_time;
  654     } else {
  655         *segment_start_time = std::max(lock->m_segment_video_start_time, lock->m_segment_audio_start_time);
  656         *segment_stop_time = std::min(lock->m_segment_video_stop_time, lock->m_segment_audio_stop_time);
  657     }
  658 }
  659 
  660 void Synchronizer::FlushBuffers(SharedData* lock) {
  661     if(!lock->m_segment_video_started || !lock->m_segment_audio_started)
  662         return;
  663 
  664     int64_t segment_start_time, segment_stop_time;
  665     GetSegmentStartStop(lock, &segment_start_time, &segment_stop_time);
  666 
  667     // flush video
  668     if(m_output_format->m_video_enabled)
  669         FlushVideoBuffer(lock, segment_start_time, segment_stop_time);
  670 
  671     // flush audio
  672     if(m_output_format->m_audio_enabled)
  673         FlushAudioBuffer(lock, segment_start_time, segment_stop_time);
  674 
  675 }
  676 
  677 void Synchronizer::FlushVideoBuffer(Synchronizer::SharedData* lock, int64_t segment_start_time, int64_t segment_stop_time) {
  678 
  679     // Sometimes long delays between video frames can occur, e.g. when a game is showing a loading screen.
  680     // Not all codecs/players can handle that. It's also a problem for streaming. To fix this, long delays should be avoided by
  681     // duplicating the previous frame a few times when needed. Whenever a video frame is sent to the encoder, it is also copied,
  682     // with reference counting for the actual image to minimize overhead. When there is a gap, duplicate frames are inserted.
  683     // Duplicate frames are always inserted with a timestamp in the past, because we don't want to drop a real frame if it is captured
  684     // right after the duplicate was inserted. MAX_INPUT_LATENCY simulates the latency from the capturing of a frame to the synchronizer,
  685     // i.e. any new frame is assumed to have a timestamp higher than the current time minus MAX_INPUT_LATENCY. The duplicate
  686     // frame will have a timestamp that's one frame earlier than that time, so it will never interfere with the real frame.
  687     // There are two situations where duplicate frames can be inserted:
  688     // (1) The queue is not empty, but there is a gap between frames that is too large.
  689     // (2) The queue is empty and the last timestamp is too long ago (relative to the end of the video segment).
  690     // It is perfectly possible that *both* happen, each possibly multiple times, in just one function call.
  691 
  692     int64_t segment_stop_video_pts = (lock->m_time_offset + (segment_stop_time - segment_start_time)) * (int64_t) m_output_format->m_video_frame_rate / (int64_t) 1000000;
  693     int64_t delay_time_per_frame = 1000000 / m_output_format->m_video_frame_rate;
  694     for( ; ; ) {
  695 
  696         // get/predict the timestamp of the next frame
  697         int64_t next_timestamp = (lock->m_video_buffer.empty())? lock->m_segment_video_stop_time - (int64_t) (1000000 / m_output_format->m_video_frame_rate) : lock->m_video_buffer.front()->GetFrame()->pts;
  698         int64_t next_pts = (lock->m_time_offset + (next_timestamp - segment_start_time)) * (int64_t) m_output_format->m_video_frame_rate / (int64_t) 1000000;
  699 
  700         // if the frame is too late, decrease the pts by one to avoid gaps
  701         if(next_pts > lock->m_video_pts)
  702             --next_pts;
  703 
  704         // insert delays if needed, up to the segment end
  705         while(lock->m_segment_video_accumulated_delay >= delay_time_per_frame && lock->m_video_pts < segment_stop_video_pts) {
  706             lock->m_segment_video_accumulated_delay -= delay_time_per_frame;
  707             lock->m_video_pts += 1;
  708             //Logger::LogInfo("[Synchronizer::FlushVideoBuffer] Delay [" + QString::number(lock->m_video_pts - 1) + "] acc " + QString::number(lock->m_segment_video_accumulated_delay) + ".");
  709         }
  710 
  711         // insert duplicate frames if needed, up to either the next frame or the segment end
  712         if(lock->m_last_video_frame_data != NULL) {
  713             while(lock->m_video_pts + m_max_frames_skipped < std::min(next_pts, segment_stop_video_pts)) {
  714 
  715                 // create duplicate frame
  716                 std::unique_ptr<AVFrameWrapper> duplicate_frame = CreateVideoFrame(m_output_format->m_video_width, m_output_format->m_video_height, m_output_format->m_video_pixel_format, lock->m_last_video_frame_data);
  717                 duplicate_frame->GetFrame()->pts = lock->m_video_pts + m_max_frames_skipped;
  718 
  719                 // add new block to sync diagram
  720                 if(m_sync_diagram != NULL) {
  721                     double t = (double) duplicate_frame->GetFrame()->pts / (double) m_output_format->m_video_frame_rate;
  722                     m_sync_diagram->AddBlock(2, t, t + 1.0 / (double) m_output_format->m_video_frame_rate, QColor(255, 196, 0));
  723                 }
  724 
  725                 // send the frame to the encoder
  726                 lock->m_segment_video_accumulated_delay = std::max((int64_t) 0, lock->m_segment_video_accumulated_delay - m_max_frames_skipped * delay_time_per_frame);
  727                 lock->m_video_pts = duplicate_frame->GetFrame()->pts + 1;
  728                 //Logger::LogInfo("[Synchronizer::FlushVideoBuffer] Encoded video frame [" + QString::number(duplicate_frame->GetFrame()->pts) + "] (duplicate) acc " + QString::number(lock->m_segment_video_accumulated_delay) + ".");
  729                 m_output_manager->AddVideoFrame(std::move(duplicate_frame));
  730                 lock->m_segment_video_accumulated_delay += m_output_manager->GetVideoFrameDelay();
  731 
  732             }
  733         }
  734 
  735         // if there are no frames, or they are beyond the segment end, stop
  736         if(lock->m_video_buffer.empty() || next_pts >= segment_stop_video_pts)
  737             break;
  738 
  739         // get the frame
  740         std::unique_ptr<AVFrameWrapper> frame = std::move(lock->m_video_buffer.front());
  741         lock->m_video_buffer.pop_front();
  742         frame->GetFrame()->pts = next_pts;
  743         lock->m_last_video_frame_data = frame->GetFrameData();
  744 
  745         // if the frame is too early, drop it
  746         if(frame->GetFrame()->pts < lock->m_video_pts) {
  747             //Logger::LogInfo("[Synchronizer::FlushVideoBuffer] Dropped video frame [" + QString::number(frame->GetFrame()->pts) + "] acc " + QString::number(lock->m_segment_video_accumulated_delay) + ".");
  748             continue;
  749         }
  750 
  751         // if this is the first video frame, always set the pts to zero
  752         if(lock->m_video_pts == 0)
  753             frame->GetFrame()->pts = 0;
  754 
  755         // add new block to sync diagram
  756         if(m_sync_diagram != NULL) {
  757             double t = (double) frame->GetFrame()->pts / (double) m_output_format->m_video_frame_rate;
  758             m_sync_diagram->AddBlock(2, t, t + 1.0 / (double) m_output_format->m_video_frame_rate, QColor(255, 0, 0));
  759         }
  760 
  761         // send the frame to the encoder
  762         lock->m_segment_video_accumulated_delay = std::max((int64_t) 0, lock->m_segment_video_accumulated_delay - (frame->GetFrame()->pts - lock->m_video_pts) * delay_time_per_frame);
  763         lock->m_video_pts = frame->GetFrame()->pts + 1;
  764         //Logger::LogInfo("[Synchronizer::FlushBuffers] Encoded video frame [" + QString::number(frame->GetFrame()->pts) + "].");
  765         m_output_manager->AddVideoFrame(std::move(frame));
  766         lock->m_segment_video_accumulated_delay += m_output_manager->GetVideoFrameDelay();
  767 
  768     }
  769 
  770 }
  771 
  772 void Synchronizer::FlushAudioBuffer(Synchronizer::SharedData* lock, int64_t segment_start_time, int64_t segment_stop_time) {
  773 
  774     double sample_length = (double) (segment_stop_time - lock->m_segment_audio_start_time) * 1.0e-6;
  775     int64_t samples_max = (int64_t) ceil(sample_length * (double) m_output_format->m_audio_sample_rate) - lock->m_segment_audio_samples_read;
  776     if(lock->m_audio_buffer.GetSize() > 0) {
  777 
  778         // Normally, the correct way to calculate the position of the first sample would be:
  779         //     int64_t timestamp = lock->m_segment_audio_start_time + (int64_t) round((double) lock->m_segment_audio_samples_read / (double) m_audio_sample_rate * 1.0e6);
  780         //     int64_t pos = (int64_t) round((double) (lock->m_time_offset + (timestamp - segment_start_time)) * 1.0e-6 * (double) m_audio_sample_rate);
  781         // Simplified:
  782         //     int64_t pos = (int64_t) round((double) (lock->m_time_offset + (lock->m_segment_audio_start_time - segment_start_time)) * 1.0e-6 * (double) m_audio_sample_rate)
  783         //                   + lock->m_segment_audio_samples_read;
  784         // The first part of the expression is constant, so it only has to be calculated at the start of the segment. After that the increase in position is always
  785         // equal to the number of samples written. Samples are only dropped at the start of the segment, so actually
  786         // the position doesn't have to be calculated anymore after that, since it is assumed to be equal to lock->m_audio_samples.
  787 
  788         if(lock->m_segment_audio_can_drop) {
  789 
  790             // calculate the offset of the first sample
  791             int64_t pos = (int64_t) round((double) (lock->m_time_offset + (lock->m_segment_audio_start_time - segment_start_time)) * 1.0e-6 * (double) m_output_format->m_audio_sample_rate)
  792                           + lock->m_segment_audio_samples_read;
  793 
  794             // drop samples that are too early
  795             if(pos < lock->m_audio_samples) {
  796                 int64_t n = std::min(lock->m_audio_samples - pos, (int64_t) lock->m_audio_buffer.GetSize() / m_output_format->m_audio_channels);
  797                 lock->m_audio_buffer.Pop(n * m_output_format->m_audio_channels);
  798                 lock->m_segment_audio_samples_read += n;
  799             }
  800 
  801         }
  802 
  803         int64_t samples_left = std::min(samples_max, (int64_t) lock->m_audio_buffer.GetSize() / m_output_format->m_audio_channels);
  804 
  805         // add new block to sync diagram
  806         if(m_sync_diagram != NULL && samples_left > 0) {
  807             double t = (double) lock->m_audio_samples / (double) m_output_format->m_audio_sample_rate;
  808             m_sync_diagram->AddBlock(3, t, t + (double) samples_left / (double) m_output_format->m_audio_sample_rate, QColor(0, 255, 0));
  809         }
  810 
  811         // send the samples to the encoder
  812         while(samples_left > 0) {
  813 
  814             lock->m_segment_audio_can_drop = false;
  815 
  816             // copy samples until either the partial frame is full or there are no samples left
  817             //TODO// do direct copy/conversion to new audio frame?
  818             int64_t n = std::min((int64_t) (m_output_format->m_audio_frame_size - lock->m_partial_audio_frame_samples), samples_left);
  819             lock->m_audio_buffer.Pop(lock->m_partial_audio_frame.GetData() + lock->m_partial_audio_frame_samples * m_output_format->m_audio_channels, n * m_output_format->m_audio_channels);
  820             lock->m_segment_audio_samples_read += n;
  821             lock->m_partial_audio_frame_samples += n;
  822             lock->m_audio_samples += n;
  823             samples_left -= n;
  824 
  825             // is the partial frame full?
  826             if(lock->m_partial_audio_frame_samples == m_output_format->m_audio_frame_size) {
  827 
  828                 // allocate a frame
  829 #if SSR_USE_AVUTIL_PLANAR_SAMPLE_FMT
  830                 unsigned int planes = (m_output_format->m_audio_sample_format == AV_SAMPLE_FMT_S16P ||
  831                                        m_output_format->m_audio_sample_format == AV_SAMPLE_FMT_FLTP)? m_output_format->m_audio_channels : 1;
  832 #else
  833                 unsigned int planes = 1;
  834 #endif
  835                 std::unique_ptr<AVFrameWrapper> audio_frame = CreateAudioFrame(m_output_format->m_audio_channels, m_output_format->m_audio_sample_rate,
  836                                                                                m_output_format->m_audio_frame_size, planes, m_output_format->m_audio_sample_format);
  837                 audio_frame->GetFrame()->pts = lock->m_audio_samples;
  838 
  839                 // copy/convert the samples
  840                 switch(m_output_format->m_audio_sample_format) {
  841                     case AV_SAMPLE_FMT_S16: {
  842                         float *data_in = (float*) lock->m_partial_audio_frame.GetData();
  843                         int16_t *data_out = (int16_t*) audio_frame->GetFrame()->data[0];
  844                         SampleCopy(m_output_format->m_audio_frame_size * m_output_format->m_audio_channels, data_in, 1, data_out, 1);
  845                         break;
  846                     }
  847                     case AV_SAMPLE_FMT_FLT: {
  848                         float *data_in = (float*) lock->m_partial_audio_frame.GetData();
  849                         float *data_out = (float*) audio_frame->GetFrame()->data[0];
  850                         memcpy(data_out, data_in, m_output_format->m_audio_frame_size * m_output_format->m_audio_channels * sizeof(float));
  851                         break;
  852                     }
  853 #if SSR_USE_AVUTIL_PLANAR_SAMPLE_FMT
  854                     case AV_SAMPLE_FMT_S16P: {
  855                         for(unsigned int p = 0; p < planes; ++p) {
  856                             float *data_in = (float*) lock->m_partial_audio_frame.GetData() + p;
  857                             int16_t *data_out = (int16_t*) audio_frame->GetFrame()->data[p];
  858                             SampleCopy(m_output_format->m_audio_frame_size, data_in, planes, data_out, 1);
  859                         }
  860                         break;
  861                     }
  862                     case AV_SAMPLE_FMT_FLTP: {
  863                         for(unsigned int p = 0; p < planes; ++p) {
  864                             float *data_in = (float*) lock->m_partial_audio_frame.GetData() + p;
  865                             float *data_out = (float*) audio_frame->GetFrame()->data[p];
  866                             SampleCopy(m_output_format->m_audio_frame_size, data_in, planes, data_out, 1);
  867                         }
  868                         break;
  869                     }
  870 #endif
  871                     default: {
  872                         assert(false);
  873                         break;
  874                     }
  875                 }
  876                 lock->m_partial_audio_frame_samples = 0;
  877 
  878                 //Logger::LogInfo("[Synchronizer::FlushAudioBuffer] Encoded audio frame [" + QString::number(lock->m_partial_audio_frame->pts) + "].");
  879                 m_output_manager->AddAudioFrame(std::move(audio_frame));
  880             }
  881 
  882         }
  883 
  884     }
  885 
  886 }
  887 
  888 void Synchronizer::SynchronizerThread() {
  889     try {
  890 
  891         Logger::LogInfo("[Synchronizer::SynchronizerThread] " + Logger::tr("Synchronizer thread started."));
  892 
  893         while(!m_should_stop) {
  894 
  895             {
  896                 SharedLock lock(&m_shared_data);
  897                 FlushBuffers(lock.get());
  898                 if(m_sync_diagram != NULL) {
  899                     double time_in = (double) hrt_time_micro() * 1.0e-6;
  900                     double time_out = (double) GetTotalTime(lock.get()) * 1.0e-6;
  901                     m_sync_diagram->SetCurrentTime(0, time_in);
  902                     m_sync_diagram->SetCurrentTime(1, time_in);
  903                     m_sync_diagram->SetCurrentTime(2, time_out);
  904                     m_sync_diagram->SetCurrentTime(3, time_out);
  905                     m_sync_diagram->Update();
  906                 }
  907             }
  908 
  909             usleep(20000);
  910 
  911         }
  912 
  913         Logger::LogInfo("[Synchronizer::SynchronizerThread] " + Logger::tr("Synchronizer thread stopped."));
  914 
  915     } catch(const std::exception& e) {
  916         m_error_occurred = true;
  917         Logger::LogError("[Synchronizer::SynchronizerThread] " + Logger::tr("Exception '%1' in synchronizer thread.").arg(e.what()));
  918     } catch(...) {
  919         m_error_occurred = true;
  920         Logger::LogError("[Synchronizer::SynchronizerThread] " + Logger::tr("Unknown exception in synchronizer thread."));
  921     }
  922 }