Commit 1815237d authored by hubbe@chromium.org's avatar hubbe@chromium.org

Cast: Print out AV sync in cast receiver if playing test video

Test videos can be found in chrome/test/data/extensions/api_test/cast_streaming/test_video_*fps.webm

Review URL: https://codereview.chromium.org/257703002

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@266300 0039d316-1c4b-4281-b951-d872f2087c98
parent fb1d720b
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <cstdarg> #include <cstdarg>
#include <cstdio> #include <cstdio>
#include <deque> #include <deque>
#include <map>
#include <string> #include <string>
#include <utility> #include <utility>
...@@ -32,6 +33,8 @@ ...@@ -32,6 +33,8 @@
#include "media/cast/cast_environment.h" #include "media/cast/cast_environment.h"
#include "media/cast/cast_receiver.h" #include "media/cast/cast_receiver.h"
#include "media/cast/logging/logging_defines.h" #include "media/cast/logging/logging_defines.h"
#include "media/cast/test/utility/audio_utility.h"
#include "media/cast/test/utility/barcode.h"
#include "media/cast/test/utility/default_config.h" #include "media/cast/test/utility/default_config.h"
#include "media/cast/test/utility/in_process_receiver.h" #include "media/cast/test/utility/in_process_receiver.h"
#include "media/cast/test/utility/input_builder.h" #include "media/cast/test/utility/input_builder.h"
...@@ -270,6 +273,13 @@ class NaivePlayer : public InProcessReceiver, ...@@ -270,6 +273,13 @@ class NaivePlayer : public InProcessReceiver,
<< "Video: Discontinuity in received frames."; << "Video: Discontinuity in received frames.";
video_playout_queue_.push_back(std::make_pair(playout_time, video_frame)); video_playout_queue_.push_back(std::make_pair(playout_time, video_frame));
ScheduleVideoPlayout(); ScheduleVideoPlayout();
uint16 frame_no;
if (media::cast::test::DecodeBarcode(video_frame, &frame_no)) {
video_play_times_.insert(
std::pair<uint16, base::TimeTicks>(frame_no, playout_time));
} else {
VLOG(2) << "Barcode decode failed!";
}
} }
virtual void OnAudioFrame(scoped_ptr<AudioBus> audio_frame, virtual void OnAudioFrame(scoped_ptr<AudioBus> audio_frame,
...@@ -279,6 +289,23 @@ class NaivePlayer : public InProcessReceiver, ...@@ -279,6 +289,23 @@ class NaivePlayer : public InProcessReceiver,
LOG_IF(WARNING, !is_continuous) LOG_IF(WARNING, !is_continuous)
<< "Audio: Discontinuity in received frames."; << "Audio: Discontinuity in received frames.";
base::AutoLock auto_lock(audio_lock_); base::AutoLock auto_lock(audio_lock_);
uint16 frame_no;
if (media::cast::DecodeTimestamp(audio_frame->channel(0),
audio_frame->frames(),
&frame_no)) {
// Since there are lots of audio packets with the same frame_no,
// we really want to make sure that we get the playout_time from
// the first one. If is_continous is true, then it's possible
// that we already missed the first one.
if (is_continuous && frame_no == last_audio_frame_no_ + 1) {
audio_play_times_.insert(
std::pair<uint16, base::TimeTicks>(frame_no, playout_time));
}
last_audio_frame_no_ = frame_no;
} else {
VLOG(2) << "Audio decode failed!";
last_audio_frame_no_ = -2;
}
audio_playout_queue_.push_back( audio_playout_queue_.push_back(
std::make_pair(playout_time, audio_frame.release())); std::make_pair(playout_time, audio_frame.release()));
} }
...@@ -396,6 +423,7 @@ class NaivePlayer : public InProcessReceiver, ...@@ -396,6 +423,7 @@ class NaivePlayer : public InProcessReceiver,
#endif // OS_LINUX #endif // OS_LINUX
} }
ScheduleVideoPlayout(); ScheduleVideoPlayout();
CheckAVSync();
} }
scoped_refptr<VideoFrame> PopOneVideoFrame(bool is_being_skipped) { scoped_refptr<VideoFrame> PopOneVideoFrame(bool is_being_skipped) {
...@@ -434,6 +462,37 @@ class NaivePlayer : public InProcessReceiver, ...@@ -434,6 +462,37 @@ class NaivePlayer : public InProcessReceiver,
return ret.Pass(); return ret.Pass();
} }
void CheckAVSync() {
if (video_play_times_.size() > 30 &&
audio_play_times_.size() > 30) {
size_t num_events = 0;
base::TimeDelta delta;
std::map<uint16, base::TimeTicks>::iterator audio_iter, video_iter;
for (video_iter = video_play_times_.begin();
video_iter != video_play_times_.end();
++video_iter) {
audio_iter = audio_play_times_.find(video_iter->first);
if (audio_iter != audio_play_times_.end()) {
num_events++;
// Positive values means audio is running behind video.
delta += audio_iter->second - video_iter->second;
}
}
if (num_events > 30) {
VLOG(0) << "Audio behind by: "
<< (delta / num_events).InMilliseconds()
<< "ms";
video_play_times_.clear();
audio_play_times_.clear();
}
} else if (video_play_times_.size() + audio_play_times_.size() > 500) {
// We are decoding audio or video timestamps, but not both, clear it out.
video_play_times_.clear();
audio_play_times_.clear();
}
}
// Frames in the queue older than this (relative to NowTicks()) will be // Frames in the queue older than this (relative to NowTicks()) will be
// dropped (i.e., playback is falling behind). // dropped (i.e., playback is falling behind).
const base::TimeDelta max_frame_age_; const base::TimeDelta max_frame_age_;
...@@ -461,6 +520,10 @@ class NaivePlayer : public InProcessReceiver, ...@@ -461,6 +520,10 @@ class NaivePlayer : public InProcessReceiver,
// These must only be used on the audio thread calling OnMoreData(). // These must only be used on the audio thread calling OnMoreData().
scoped_ptr<AudioBus> currently_playing_audio_frame_; scoped_ptr<AudioBus> currently_playing_audio_frame_;
int currently_playing_audio_frame_start_; int currently_playing_audio_frame_start_;
std::map<uint16, base::TimeTicks> audio_play_times_;
std::map<uint16, base::TimeTicks> video_play_times_;
int32 last_audio_frame_no_;
}; };
} // namespace cast } // namespace cast
......
...@@ -79,19 +79,11 @@ bool EncodeBarcode(const std::vector<bool>& bits, ...@@ -79,19 +79,11 @@ bool EncodeBarcode(const std::vector<bool>& bits,
return true; return true;
} }
// Note that "output" is assumed to be the right size already. This namespace {
// could be inferred from the data, but the decoding is more robust bool DecodeBarCodeRows(const scoped_refptr<VideoFrame>& frame,
// if we can assume that we know how many bits we want. std::vector<bool>* output,
bool DecodeBarcode(const scoped_refptr<VideoFrame>& frame, int min_row,
std::vector<bool>* output) { int max_row) {
DCHECK(frame->format() == VideoFrame::YV12 ||
frame->format() == VideoFrame::YV16 ||
frame->format() == VideoFrame::I420 ||
frame->format() == VideoFrame::YV12J);
int min_row = std::max(0, frame->rows(VideoFrame::kYPlane) / 2 - 10);
int max_row = std::min(frame->rows(VideoFrame::kYPlane),
frame->rows(VideoFrame::kYPlane) / 2 + 10);
// Do a basic run-length encoding // Do a basic run-length encoding
std::deque<int> runs; std::deque<int> runs;
bool is_black = true; bool is_black = true;
...@@ -151,6 +143,34 @@ bool DecodeBarcode(const scoped_refptr<VideoFrame>& frame, ...@@ -151,6 +143,34 @@ bool DecodeBarcode(const scoped_refptr<VideoFrame>& frame,
return false; return false;
} }
} // namespace
// Note that "output" is assumed to be the right size already. This
// could be inferred from the data, but the decoding is more robust
// if we can assume that we know how many bits we want.
bool DecodeBarcode(const scoped_refptr<VideoFrame>& frame,
std::vector<bool>* output) {
DCHECK(frame->format() == VideoFrame::YV12 ||
frame->format() == VideoFrame::YV16 ||
frame->format() == VideoFrame::I420 ||
frame->format() == VideoFrame::YV12J);
int rows = frame->rows(VideoFrame::kYPlane);
// Middle 10 lines
if (DecodeBarCodeRows(frame,
output,
std::max(0, rows / 2 - 5),
std::min(rows, rows / 2 + 5))) {
return true;
}
// Top 5 lines
if (DecodeBarCodeRows(frame, output, 0, std::min(5, rows))) {
return true;
}
return false;
}
} // namespace test } // namespace test
} // namespace cast } // namespace cast
} // namespace media } // namespace media
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment