Commit c09161c6 authored by mikhal@chromium.org's avatar mikhal@chromium.org

Cast: Refactor Audio Receiver to Clang format

This cl is pure style-targeted refactoring.
No functional changes were made.

BUG=339176

Review URL: https://codereview.chromium.org/149703002

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@247908 0039d316-1c4b-4281-b951-d872f2087c98
parent 82439ec2
......@@ -17,8 +17,11 @@ AudioDecoder::AudioDecoder(scoped_refptr<CastEnvironment> cast_environment,
: cast_environment_(cast_environment),
audio_decoder_(webrtc::AudioCodingModule::Create(0)),
cast_message_builder_(cast_environment->Clock(),
incoming_payload_feedback, &frame_id_map_, audio_config.incoming_ssrc,
true, 0),
incoming_payload_feedback,
&frame_id_map_,
audio_config.incoming_ssrc,
true,
0),
have_received_packets_(false),
last_played_out_timestamp_(0) {
audio_decoder_->InitializeReceiver();
......@@ -68,7 +71,8 @@ bool AudioDecoder::GetRawAudioFrame(int number_of_10ms_blocks,
bool have_received_packets = have_received_packets_;
lock_.Release();
if (!have_received_packets) return false;
if (!have_received_packets)
return false;
audio_frame->samples.clear();
......@@ -110,15 +114,16 @@ void AudioDecoder::IncomingParsedRtpPacket(const uint8* payload_data,
const RtpCastHeader& rtp_header) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
DCHECK_LE(payload_size, kMaxIpPacketSize);
audio_decoder_->IncomingPacket(payload_data, static_cast<int32>(payload_size),
rtp_header.webrtc);
audio_decoder_->IncomingPacket(
payload_data, static_cast<int32>(payload_size), rtp_header.webrtc);
lock_.Acquire();
have_received_packets_ = true;
uint32 last_played_out_timestamp = last_played_out_timestamp_;
lock_.Release();
PacketType packet_type = frame_id_map_.InsertPacket(rtp_header);
if (packet_type != kNewPacketCompletingFrame) return;
if (packet_type != kNewPacketCompletingFrame)
return;
cast_message_builder_.CompleteFrameReceived(rtp_header.frame_id,
rtp_header.is_key_frame);
......@@ -126,7 +131,8 @@ void AudioDecoder::IncomingParsedRtpPacket(const uint8* payload_data,
frame_id_rtp_timestamp_map_[rtp_header.frame_id] =
rtp_header.webrtc.header.timestamp;
if (last_played_out_timestamp == 0) return; // Nothing is played out yet.
if (last_played_out_timestamp == 0)
return; // Nothing is played out yet.
uint32 latest_frame_id_to_remove = 0;
bool frame_to_remove = false;
......@@ -141,7 +147,8 @@ void AudioDecoder::IncomingParsedRtpPacket(const uint8* payload_data,
frame_id_rtp_timestamp_map_.erase(it);
it = frame_id_rtp_timestamp_map_.begin();
}
if (!frame_to_remove) return;
if (!frame_to_remove)
return;
frame_id_map_.RemoveOldFrames(latest_frame_id_to_remove);
}
......
......@@ -51,7 +51,7 @@ class AudioDecoder {
private:
scoped_refptr<CastEnvironment> cast_environment_;
// The webrtc AudioCodingModule is threadsafe.
// The webrtc AudioCodingModule is thread safe.
scoped_ptr<webrtc::AudioCodingModule> audio_decoder_;
FrameIdMap frame_id_map_;
......
......@@ -30,10 +30,15 @@ class AudioDecoderTest : public ::testing::Test {
testing_clock_ = new base::SimpleTestTickClock();
testing_clock_->Advance(base::TimeDelta::FromMilliseconds(1234));
task_runner_ = new test::FakeTaskRunner(testing_clock_);
cast_environment_ = new CastEnvironment(
scoped_ptr<base::TickClock>(testing_clock_).Pass(), task_runner_,
task_runner_, task_runner_, task_runner_, task_runner_, task_runner_,
GetDefaultCastReceiverLoggingConfig());
cast_environment_ =
new CastEnvironment(scoped_ptr<base::TickClock>(testing_clock_).Pass(),
task_runner_,
task_runner_,
task_runner_,
task_runner_,
task_runner_,
task_runner_,
GetDefaultCastReceiverLoggingConfig());
}
virtual ~AudioDecoderTest() {}
......@@ -48,6 +53,8 @@ class AudioDecoderTest : public ::testing::Test {
scoped_refptr<test::FakeTaskRunner> task_runner_;
scoped_refptr<CastEnvironment> cast_environment_;
scoped_ptr<AudioDecoder> audio_decoder_;
DISALLOW_COPY_AND_ASSIGN(AudioDecoderTest);
};
TEST_F(AudioDecoderTest, Pcm16MonoNoResampleOnePacket) {
......@@ -75,21 +82,17 @@ TEST_F(AudioDecoderTest, Pcm16MonoNoResampleOnePacket) {
PcmAudioFrame audio_frame;
uint32 rtp_timestamp;
EXPECT_FALSE(audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
desired_frequency,
&audio_frame,
&rtp_timestamp));
EXPECT_FALSE(audio_decoder_->GetRawAudioFrame(
number_of_10ms_blocks, desired_frequency, &audio_frame, &rtp_timestamp));
uint8* payload_data = reinterpret_cast<uint8*>(&payload[0]);
size_t payload_size = payload.size() * sizeof(int16);
audio_decoder_->IncomingParsedRtpPacket(payload_data,
payload_size, rtp_header);
audio_decoder_->IncomingParsedRtpPacket(
payload_data, payload_size, rtp_header);
EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
desired_frequency,
&audio_frame,
&rtp_timestamp));
EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(
number_of_10ms_blocks, desired_frequency, &audio_frame, &rtp_timestamp));
EXPECT_EQ(1, audio_frame.channels);
EXPECT_EQ(16000, audio_frame.frequency);
EXPECT_EQ(640ul, audio_frame.samples.size());
......@@ -125,24 +128,22 @@ TEST_F(AudioDecoderTest, Pcm16StereoNoResampleTwoPackets) {
uint8* payload_data = reinterpret_cast<uint8*>(&payload[0]);
size_t payload_size = payload.size() * sizeof(int16);
audio_decoder_->IncomingParsedRtpPacket(payload_data,
payload_size, rtp_header);
audio_decoder_->IncomingParsedRtpPacket(
payload_data, payload_size, rtp_header);
int number_of_10ms_blocks = 2;
int desired_frequency = 16000;
PcmAudioFrame audio_frame;
uint32 rtp_timestamp;
EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
desired_frequency,
&audio_frame,
&rtp_timestamp));
EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(
number_of_10ms_blocks, desired_frequency, &audio_frame, &rtp_timestamp));
EXPECT_EQ(2, audio_frame.channels);
EXPECT_EQ(16000, audio_frame.frequency);
EXPECT_EQ(640ul, audio_frame.samples.size());
// First 10 samples per channel are 0 from NetEq.
for (size_t i = 10 * audio_config.channels; i < audio_frame.samples.size();
++i) {
++i) {
EXPECT_EQ(0x3412, audio_frame.samples[i]);
}
......@@ -150,13 +151,11 @@ TEST_F(AudioDecoderTest, Pcm16StereoNoResampleTwoPackets) {
rtp_header.webrtc.header.sequenceNumber++;
rtp_header.webrtc.header.timestamp += (audio_config.frequency / 100) * 2 * 2;
audio_decoder_->IncomingParsedRtpPacket(payload_data,
payload_size, rtp_header);
audio_decoder_->IncomingParsedRtpPacket(
payload_data, payload_size, rtp_header);
EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
desired_frequency,
&audio_frame,
&rtp_timestamp));
EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(
number_of_10ms_blocks, desired_frequency, &audio_frame, &rtp_timestamp));
EXPECT_EQ(2, audio_frame.channels);
EXPECT_EQ(16000, audio_frame.frequency);
EXPECT_EQ(640ul, audio_frame.samples.size());
......@@ -194,18 +193,16 @@ TEST_F(AudioDecoderTest, Pcm16Resample) {
uint8* payload_data = reinterpret_cast<uint8*>(&payload[0]);
size_t payload_size = payload.size() * sizeof(int16);
audio_decoder_->IncomingParsedRtpPacket(payload_data,
payload_size, rtp_header);
audio_decoder_->IncomingParsedRtpPacket(
payload_data, payload_size, rtp_header);
int number_of_10ms_blocks = 2;
int desired_frequency = 48000;
PcmAudioFrame audio_frame;
uint32 rtp_timestamp;
EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(number_of_10ms_blocks,
desired_frequency,
&audio_frame,
&rtp_timestamp));
EXPECT_TRUE(audio_decoder_->GetRawAudioFrame(
number_of_10ms_blocks, desired_frequency, &audio_frame, &rtp_timestamp));
EXPECT_EQ(2, audio_frame.channels);
EXPECT_EQ(48000, audio_frame.frequency);
......@@ -213,9 +210,10 @@ TEST_F(AudioDecoderTest, Pcm16Resample) {
int count = 0;
// Resampling makes the variance worse.
for (size_t i = 100 * audio_config.channels; i < audio_frame.samples.size();
++i) {
++i) {
EXPECT_NEAR(0x3412, audio_frame.samples[i], 400);
if (0x3412 == audio_frame.samples[i]) count++;
if (0x3412 == audio_frame.samples[i])
count++;
}
}
......
This diff is collapsed.
......@@ -16,7 +16,7 @@
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
#include "media/cast/cast_receiver.h"
#include "media/cast/rtcp/rtcp.h" // RtcpCastMessage
#include "media/cast/rtcp/rtcp.h" // RtcpCastMessage
#include "media/cast/rtp_receiver/rtp_receiver_defines.h" // RtpCastHeader
namespace crypto {
......@@ -69,6 +69,7 @@ class AudioReceiver : public base::NonThreadSafe,
void IncomingParsedRtpPacket(const uint8* payload_data,
size_t payload_size,
const RtpCastHeader& rtp_header);
private:
friend class LocalRtpAudioData;
friend class LocalRtpAudioFeedback;
......@@ -90,7 +91,8 @@ class AudioReceiver : public base::NonThreadSafe,
int desired_frequency,
const AudioFrameDecodedCallback callback);
void ReturnDecodedFrameWithPlayoutDelay(
scoped_ptr<PcmAudioFrame> audio_frame, uint32 rtp_timestamp,
scoped_ptr<PcmAudioFrame> audio_frame,
uint32 rtp_timestamp,
const AudioFrameDecodedCallback callback);
// Return the playout time based on the current time and rtp timestamp.
......
......@@ -20,11 +20,10 @@ namespace cast {
static const int64 kStartMillisecond = GG_INT64_C(12345678900000);
namespace {
class TestAudioEncoderCallback :
public base::RefCountedThreadSafe<TestAudioEncoderCallback> {
class TestAudioEncoderCallback
: public base::RefCountedThreadSafe<TestAudioEncoderCallback> {
public:
TestAudioEncoderCallback()
: num_called_(0) {}
TestAudioEncoderCallback() : num_called_(0) {}
void SetExpectedResult(uint8 expected_frame_id,
const base::TimeTicks& expected_playout_time) {
......@@ -41,7 +40,7 @@ class TestAudioEncoderCallback :
num_called_++;
}
int number_times_called() const { return num_called_;}
int number_times_called() const { return num_called_; }
protected:
virtual ~TestAudioEncoderCallback() {}
......@@ -52,6 +51,8 @@ class TestAudioEncoderCallback :
int num_called_;
uint8 expected_frame_id_;
base::TimeTicks expected_playout_time_;
DISALLOW_COPY_AND_ASSIGN(TestAudioEncoderCallback);
};
} // namespace
......@@ -79,17 +80,22 @@ class AudioReceiverTest : public ::testing::Test {
testing_clock_->Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
task_runner_ = new test::FakeTaskRunner(testing_clock_);
cast_environment_ = new CastEnvironment(
scoped_ptr<base::TickClock>(testing_clock_).Pass(), task_runner_,
task_runner_, task_runner_, task_runner_, task_runner_,
task_runner_, GetDefaultCastReceiverLoggingConfig());
cast_environment_ =
new CastEnvironment(scoped_ptr<base::TickClock>(testing_clock_).Pass(),
task_runner_,
task_runner_,
task_runner_,
task_runner_,
task_runner_,
task_runner_,
GetDefaultCastReceiverLoggingConfig());
test_audio_encoder_callback_ = new TestAudioEncoderCallback();
}
void Configure(bool use_external_decoder) {
audio_config_.use_external_decoder = use_external_decoder;
receiver_.reset(new PeerAudioReceiver(cast_environment_, audio_config_,
&mock_transport_));
receiver_.reset(new PeerAudioReceiver(
cast_environment_, audio_config_, &mock_transport_));
}
virtual ~AudioReceiverTest() {}
......@@ -110,7 +116,7 @@ class AudioReceiverTest : public ::testing::Test {
AudioReceiverConfig audio_config_;
std::vector<uint8> payload_;
RtpCastHeader rtp_header_;
base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
transport::MockPacedPacketSender mock_transport_;
scoped_refptr<test::FakeTaskRunner> task_runner_;
scoped_ptr<PeerAudioReceiver> receiver_;
......@@ -122,12 +128,12 @@ TEST_F(AudioReceiverTest, GetOnePacketEncodedframe) {
Configure(true);
EXPECT_CALL(mock_transport_, SendRtcpPacket(testing::_)).Times(1);
receiver_->IncomingParsedRtpPacket(payload_.data(),
payload_.size(), rtp_header_);
receiver_->IncomingParsedRtpPacket(
payload_.data(), payload_.size(), rtp_header_);
transport::EncodedAudioFrame audio_frame;
base::TimeTicks playout_time;
test_audio_encoder_callback_->SetExpectedResult(
0, testing_clock_->NowTicks());
test_audio_encoder_callback_->SetExpectedResult(0,
testing_clock_->NowTicks());
AudioFrameEncodedCallback frame_encoded_callback =
base::Bind(&TestAudioEncoderCallback::DeliverEncodedAudioFrame,
......@@ -140,8 +146,8 @@ TEST_F(AudioReceiverTest, GetOnePacketEncodedframe) {
TEST_F(AudioReceiverTest, MultiplePendingGetCalls) {
Configure(true);
EXPECT_CALL(mock_transport_, SendRtcpPacket(testing::_)).WillRepeatedly(
testing::Return(true));
EXPECT_CALL(mock_transport_, SendRtcpPacket(testing::_))
.WillRepeatedly(testing::Return(true));
AudioFrameEncodedCallback frame_encoded_callback =
base::Bind(&TestAudioEncoderCallback::DeliverEncodedAudioFrame,
......@@ -149,13 +155,13 @@ TEST_F(AudioReceiverTest, MultiplePendingGetCalls) {
receiver_->GetEncodedAudioFrame(frame_encoded_callback);
receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
rtp_header_);
receiver_->IncomingParsedRtpPacket(
payload_.data(), payload_.size(), rtp_header_);
transport::EncodedAudioFrame audio_frame;
base::TimeTicks playout_time;
test_audio_encoder_callback_->SetExpectedResult(
0, testing_clock_->NowTicks());
test_audio_encoder_callback_->SetExpectedResult(0,
testing_clock_->NowTicks());
task_runner_->RunTasks();
EXPECT_EQ(1, test_audio_encoder_callback_->number_times_called());
......@@ -165,8 +171,10 @@ TEST_F(AudioReceiverTest, MultiplePendingGetCalls) {
uint32 ntp_high;
uint32 ntp_low;
ConvertTimeTicksToNtp(testing_clock_->NowTicks(), &ntp_high, &ntp_low);
rtcp_packet.AddSrWithNtp(audio_config_.feedback_ssrc, ntp_high, ntp_low,
rtp_header_.webrtc.header.timestamp);
rtcp_packet.AddSrWithNtp(audio_config_.feedback_ssrc,
ntp_high,
ntp_low,
rtp_header_.webrtc.header.timestamp);
testing_clock_->Advance(base::TimeDelta::FromMilliseconds(20));
......@@ -179,11 +187,11 @@ TEST_F(AudioReceiverTest, MultiplePendingGetCalls) {
rtp_header_.is_reference = true;
rtp_header_.reference_frame_id = 0;
rtp_header_.webrtc.header.timestamp = 960;
test_audio_encoder_callback_->SetExpectedResult(2,
testing_clock_->NowTicks() + base::TimeDelta::FromMilliseconds(100));
test_audio_encoder_callback_->SetExpectedResult(
2, testing_clock_->NowTicks() + base::TimeDelta::FromMilliseconds(100));
receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
rtp_header_);
receiver_->IncomingParsedRtpPacket(
payload_.data(), payload_.size(), rtp_header_);
receiver_->GetEncodedAudioFrame(frame_encoded_callback);
task_runner_->RunTasks();
......@@ -198,16 +206,16 @@ TEST_F(AudioReceiverTest, MultiplePendingGetCalls) {
task_runner_->RunTasks();
EXPECT_EQ(2, test_audio_encoder_callback_->number_times_called());
test_audio_encoder_callback_->SetExpectedResult(
3, testing_clock_->NowTicks());
test_audio_encoder_callback_->SetExpectedResult(3,
testing_clock_->NowTicks());
// Through on one more pending audio frame.
rtp_header_.frame_id = 3;
rtp_header_.is_reference = false;
rtp_header_.reference_frame_id = 0;
rtp_header_.webrtc.header.timestamp = 1280;
receiver_->IncomingParsedRtpPacket(payload_.data(), payload_.size(),
rtp_header_);
receiver_->IncomingParsedRtpPacket(
payload_.data(), payload_.size(), rtp_header_);
receiver_->GetEncodedAudioFrame(frame_encoded_callback);
task_runner_->RunTasks();
......@@ -215,8 +223,7 @@ TEST_F(AudioReceiverTest, MultiplePendingGetCalls) {
}
// TODO(mikhal): Add encoded frames.
TEST_F(AudioReceiverTest, GetRawFrame) {
}
TEST_F(AudioReceiverTest, GetRawFrame) {}
} // namespace cast
} // namespace media
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment