Commit b9c27ea8 authored by rkc@chromium.org's avatar rkc@chromium.org

Improve audible token detection.

The current code doesn't differentiate between tokens decoded by the DSSS
(inaudible) or DTMF (audible) encoder/decoder. Fix this and send up the
correct medium to the server.

R=kalman@chromium.org, xiyuan@chromium.org
BUG=402334

Review URL: https://codereview.chromium.org/460743004

Cr-Commit-Position: refs/heads/master@{#288872}
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@288872 0039d316-1c4b-4281-b951-d872f2087c98
parent cfa9b0a9
......@@ -71,6 +71,7 @@ class ChromeWhispernetClientTest : public ExtensionBrowserTest {
client->RegisterSamplesCallback(base::Bind(
&ChromeWhispernetClientTest::SamplesCallback, base::Unretained(this)));
expected_token_ = kSixZeros;
expected_audible_ = audible;
client->EncodeToken(kSixZeros, audible);
run_loop_->Run();
......@@ -78,7 +79,7 @@ class ChromeWhispernetClientTest : public ExtensionBrowserTest {
EXPECT_GT(saved_samples_->frames(), 0);
}
void DecodeSamplesAndVerifyToken() {
void DecodeSamplesAndVerifyToken(bool expect_audible) {
copresence::WhispernetClient* client = GetWhispernetClient(context_);
ASSERT_TRUE(client);
......@@ -86,6 +87,7 @@ class ChromeWhispernetClientTest : public ExtensionBrowserTest {
client->RegisterTokensCallback(base::Bind(
&ChromeWhispernetClientTest::TokensCallback, base::Unretained(this)));
expected_token_ = kSixZeros;
expected_audible_ = expect_audible;
ASSERT_GT(saved_samples_->frames(), 0);
......@@ -126,18 +128,21 @@ class ChromeWhispernetClientTest : public ExtensionBrowserTest {
void SamplesCallback(
const std::string& token,
bool audible,
const scoped_refptr<media::AudioBusRefCounted>& samples) {
EXPECT_EQ(expected_token_, token);
EXPECT_EQ(expected_audible_, audible);
saved_samples_ = samples;
ASSERT_TRUE(run_loop_);
run_loop_->Quit();
}
void TokensCallback(const std::vector<std::string>& tokens) {
void TokensCallback(const std::vector<copresence::FullToken>& tokens) {
ASSERT_TRUE(run_loop_);
run_loop_->Quit();
EXPECT_EQ(expected_token_, tokens[0]);
EXPECT_EQ(expected_token_, tokens[0].token);
EXPECT_EQ(expected_audible_, tokens[0].audible);
}
void DetectBroadcastCallback(bool success) {
......@@ -151,6 +156,7 @@ class ChromeWhispernetClientTest : public ExtensionBrowserTest {
content::BrowserContext* context_;
std::string expected_token_;
bool expected_audible_;
scoped_refptr<media::AudioBusRefCounted> saved_samples_;
bool initialized_;
......@@ -170,18 +176,18 @@ IN_PROC_BROWSER_TEST_F(ChromeWhispernetClientTest, EncodeToken) {
IN_PROC_BROWSER_TEST_F(ChromeWhispernetClientTest, DecodeSamples) {
InitializeWhispernet();
EncodeTokenAndSaveSamples(false);
DecodeSamplesAndVerifyToken();
DecodeSamplesAndVerifyToken(false);
}
IN_PROC_BROWSER_TEST_F(ChromeWhispernetClientTest, DetectBroadcast) {
InitializeWhispernet();
EncodeTokenAndSaveSamples(false);
DecodeSamplesAndVerifyToken();
DecodeSamplesAndVerifyToken(false);
DetectBroadcast();
}
IN_PROC_BROWSER_TEST_F(ChromeWhispernetClientTest, Audible) {
InitializeWhispernet();
EncodeTokenAndSaveSamples(true);
DecodeSamplesAndVerifyToken();
DecodeSamplesAndVerifyToken(true);
}
......@@ -32,7 +32,12 @@ ExtensionFunction::ResponseAction CopresencePrivateSendFoundFunction::Run() {
scoped_ptr<api::copresence_private::SendFound::Params> params(
api::copresence_private::SendFound::Params::Create(*args_));
EXTENSION_FUNCTION_VALIDATE(params.get());
GetWhispernetClient()->GetTokensCallback().Run(params->tokens);
std::vector<copresence::FullToken> tokens;
for (size_t i = 0; i < params->tokens.size(); ++i) {
tokens.push_back(copresence::FullToken(params->tokens[i]->token,
params->tokens[i]->audible));
}
GetWhispernetClient()->GetTokensCallback().Run(tokens);
return RespondNow(NoArguments());
}
......@@ -55,7 +60,8 @@ ExtensionFunction::ResponseAction CopresencePrivateSendSamplesFunction::Run() {
string_as_array(&params->samples),
params->samples.size());
GetWhispernetClient()->GetSamplesCallback().Run(params->token, samples);
GetWhispernetClient()->GetSamplesCallback().Run(
params->token.token, params->token.audible, samples);
return RespondNow(NoArguments());
}
......
......@@ -92,7 +92,8 @@ WhisperEncoder.prototype.setAudioDataCallback = function(callback) {
WhisperEncoder.prototype.onNaclMessage_ = function(e) {
var msg = e.data;
if (msg.type == 'encode_token_response') {
this.audioDataCallback_(bytesToBase64(msg.token), msg.samples);
this.audioDataCallback_(
{ token: bytesToBase64(msg.token), audible: msg.audible }, msg.samples);
}
};
......@@ -178,7 +179,7 @@ WhisperDecoder.prototype.onDetectBroadcast = function(callback) {
WhisperDecoder.prototype.onNaclMessage_ = function(e) {
var msg = e.data;
if (msg.type == 'decode_tokens_response') {
this.handleCandidates_(JSON.parse(msg.tokens));
this.handleCandidates_(JSON.parse(msg.tokens), msg.audible);
} else if (msg.type == 'detect_broadcast_response') {
this.detectBroadcastCallback_(msg.detected);
}
......@@ -188,14 +189,19 @@ WhisperDecoder.prototype.onNaclMessage_ = function(e) {
* Method to receive tokens from the decoder and process and forward them to the
* token callback registered with us.
* @param {!Array.string} candidates Array of token candidates.
* @param {boolean} audible Whether the received candidates are from the audible
* decoder or not.
* @private
*/
WhisperDecoder.prototype.handleCandidates_ = function(candidates) {
WhisperDecoder.prototype.handleCandidates_ = function(candidates, audible) {
if (!this.tokenCallback_ || !candidates || candidates.length == 0)
return;
var returnCandidates = [];
for (var i = 0; i < candidates.length; ++i)
returnCandidates[i] = bytesToBase64(candidates[i]);
for (var i = 0; i < candidates.length; ++i) {
returnCandidates[i] = { token: bytesToBase64(candidates[i]),
audible: audible };
}
this.tokenCallback_(returnCandidates);
};
......@@ -25,13 +25,18 @@ namespace copresencePrivate {
RecordParameters record;
};
dictionary Token {
DOMString token;
boolean audible;
};
interface Functions {
// Send a boolean indicating whether our initialization was successful.
static void sendInitialized(boolean success);
// Sends an array of found tokens to Chrome.
static void sendFound(DOMString[] tokens);
static void sendFound(Token[] tokens);
// Send an array buffer of samples encoded for the specified token.
static void sendSamples(DOMString token, ArrayBuffer samples);
static void sendSamples(Token token, ArrayBuffer samples);
// Send a boolean indicating whether we detected a broadcast or not.
static void sendDetect(boolean detected);
};
......
......@@ -47,9 +47,10 @@ class AudioDirectiveHandlerTest : public testing::Test {
protected:
void EncodeToken(const std::string& token,
bool /* audible */,
bool audible,
const AudioDirectiveList::SamplesCallback& callback) {
callback.Run(token, CreateRandomAudioRefCounted(0x1337, 1, 0x7331));
callback.Run(
token, audible, CreateRandomAudioRefCounted(0x1337, 1, 0x7331));
}
copresence::TokenInstruction CreateTransmitInstruction(
......
......@@ -125,6 +125,7 @@ scoped_ptr<AudioDirective> AudioDirectiveList::GetNextFromList(
void AudioDirectiveList::OnTokenEncoded(
const std::string& token,
bool /* audible */,
const scoped_refptr<media::AudioBusRefCounted>& samples) {
// We shouldn't re-encode a token if it's already in the cache.
DCHECK(!samples_cache_.HasKey(token));
......
......@@ -52,8 +52,9 @@ struct AudioDirective {
// classes from it.
class AudioDirectiveList {
public:
typedef base::Callback<
void(const std::string&, const scoped_refptr<media::AudioBusRefCounted>&)>
typedef base::Callback<void(const std::string&,
bool,
const scoped_refptr<media::AudioBusRefCounted>&)>
SamplesCallback;
typedef base::Callback<void(const std::string&, bool, const SamplesCallback&)>
EncodeTokenCallback;
......@@ -78,6 +79,7 @@ class AudioDirectiveList {
// This is the method that the whispernet client needs to call to return
// samples to us.
void OnTokenEncoded(const std::string& token,
bool audible,
const scoped_refptr<media::AudioBusRefCounted>& samples);
private:
......
......@@ -26,9 +26,10 @@ class AudioDirectiveListTest : public testing::Test {
protected:
void EncodeToken(const std::string& token,
bool /* audible */,
bool audible,
const AudioDirectiveList::SamplesCallback& callback) {
callback.Run(token, CreateRandomAudioRefCounted(0x1337, 1, 0x7331));
callback.Run(
token, audible, CreateRandomAudioRefCounted(0x1337, 1, 0x7331));
}
base::MessageLoop message_loop_;
......
......@@ -18,6 +18,13 @@ class AudioBusRefCounted;
namespace copresence {
struct FullToken {
FullToken(const std::string& token, bool audible)
: token(token), audible(audible) {}
std::string token;
bool audible;
};
// The interface that the whispernet client needs to implement. These methods
// provide us the ability to use the audio medium in copresence. Currently since
// the only medium that copresence uses is audio, the implementation of this
......@@ -27,10 +34,11 @@ class WhispernetClient {
// Generic callback to indicate a boolean success or failure.
typedef base::Callback<void(bool)> SuccessCallback;
// Callback that returns detected tokens.
typedef base::Callback<void(const std::vector<std::string>&)> TokensCallback;
typedef base::Callback<void(const std::vector<FullToken>&)> TokensCallback;
// Callback that returns encoded samples for a given token.
typedef base::Callback<
void(const std::string&, const scoped_refptr<media::AudioBusRefCounted>&)>
typedef base::Callback<void(const std::string&,
bool,
const scoped_refptr<media::AudioBusRefCounted>&)>
SamplesCallback;
// Initialize the whispernet client and call the callback when done. The
......
......@@ -262,14 +262,12 @@ void RpcHandler::SendReportRequest(scoped_ptr<ReportRequest> request,
status_callback));
}
void RpcHandler::ReportTokens(TokenMedium medium,
const std::vector<std::string>& tokens) {
DCHECK_EQ(medium, AUDIO_ULTRASOUND_PASSBAND);
void RpcHandler::ReportTokens(const std::vector<FullToken>& tokens) {
DCHECK(!tokens.empty());
scoped_ptr<ReportRequest> request(new ReportRequest);
for (size_t i = 0; i < tokens.size(); ++i) {
const std::string& token = ToUrlSafe(tokens[i]);
const std::string& token = ToUrlSafe(tokens[i].token);
if (invalid_audio_token_cache_.HasKey(token))
continue;
......@@ -280,7 +278,8 @@ void RpcHandler::ReportTokens(TokenMedium medium,
token_observation->set_token_id(token);
TokenSignals* signals = token_observation->add_signals();
signals->set_medium(medium);
signals->set_medium(tokens[i].audible ? AUDIO_AUDIBLE_DTMF
: AUDIO_ULTRASOUND_PASSBAND);
signals->set_observed_time_millis(base::Time::Now().ToJsTime());
}
SendReportRequest(request.Pass());
......@@ -300,8 +299,7 @@ void RpcHandler::ConnectToWhispernet() {
whispernet_client->RegisterTokensCallback(
base::Bind(&RpcHandler::ReportTokens,
// On destruction, this callback will be disconnected.
base::Unretained(this),
AUDIO_ULTRASOUND_PASSBAND));
base::Unretained(this)));
}
// Private methods
......
......@@ -50,7 +50,7 @@ class RpcHandler {
const StatusCallback& callback);
// Report a set of tokens to the server for a given medium.
void ReportTokens(TokenMedium medium, const std::vector<std::string>& tokens);
void ReportTokens(const std::vector<FullToken>& tokens);
// Create the directive handler and connect it to
// the whispernet client specified by the delegate.
......
......@@ -266,13 +266,13 @@ TEST_F(RpcHandlerTest, MAYBE_CreateRequestHeader) {
#define MAYBE_ReportTokens DISABLED_ReportTokens
TEST_F(RpcHandlerTest, MAYBE_ReportTokens) {
std::vector<std::string> test_tokens;
test_tokens.push_back("token 1");
test_tokens.push_back("token 2");
test_tokens.push_back("token 3");
std::vector<FullToken> test_tokens;
test_tokens.push_back(FullToken("token 1", false));
test_tokens.push_back(FullToken("token 2", true));
test_tokens.push_back(FullToken("token 3", false));
AddInvalidToken("token 2");
rpc_handler_.ReportTokens(AUDIO_ULTRASOUND_PASSBAND, test_tokens);
rpc_handler_.ReportTokens(test_tokens);
EXPECT_EQ(RpcHandler::kReportRequestRpcName, rpc_name_);
ReportRequest* report = static_cast<ReportRequest*>(request_proto_.get());
google::protobuf::RepeatedPtrField<TokenObservation> tokens_sent =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment