Commit 5394e420 authored by erg@google.com's avatar erg@google.com

Reorder the methods in net/url_request/.

BUG=68682
TEST=compiles

Review URL: http://codereview.chromium.org/6382003

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@72013 0039d316-1c4b-4281-b951-d872f2087c98
parent 378a8436
......@@ -44,6 +44,44 @@ const SOCKET ListenSocket::kInvalidSocket = -1;
const int ListenSocket::kSocketError = -1;
#endif
ListenSocket* ListenSocket::Listen(std::string ip, int port,
ListenSocketDelegate* del) {
SOCKET s = Listen(ip, port);
if (s == kInvalidSocket) {
// TODO(erikkay): error handling
} else {
ListenSocket* sock = new ListenSocket(s, del);
sock->Listen();
return sock;
}
return NULL;
}
void ListenSocket::Send(const char* bytes, int len, bool append_linefeed) {
SendInternal(bytes, len);
if (append_linefeed) {
SendInternal("\r\n", 2);
}
}
void ListenSocket::Send(const std::string& str, bool append_linefeed) {
Send(str.data(), static_cast<int>(str.length()), append_linefeed);
}
void ListenSocket::PauseReads() {
DCHECK(!reads_paused_);
reads_paused_ = true;
}
void ListenSocket::ResumeReads() {
DCHECK(reads_paused_);
reads_paused_ = false;
if (has_pending_reads_) {
has_pending_reads_ = false;
Read();
}
}
ListenSocket::ListenSocket(SOCKET s, ListenSocketDelegate *del)
: socket_(s),
socket_delegate_(del),
......@@ -86,17 +124,45 @@ SOCKET ListenSocket::Listen(std::string ip, int port) {
return s;
}
ListenSocket* ListenSocket::Listen(std::string ip, int port,
ListenSocketDelegate* del) {
SOCKET s = Listen(ip, port);
if (s == kInvalidSocket) {
// TODO(erikkay): error handling
} else {
ListenSocket* sock = new ListenSocket(s, del);
sock->Listen();
return sock;
SOCKET ListenSocket::Accept(SOCKET s) {
sockaddr_in from;
socklen_t from_len = sizeof(from);
SOCKET conn =
HANDLE_EINTR(accept(s, reinterpret_cast<sockaddr*>(&from), &from_len));
if (conn != kInvalidSocket) {
net::SetNonBlocking(conn);
}
return conn;
}
void ListenSocket::SendInternal(const char* bytes, int len) {
char* send_buf = const_cast<char *>(bytes);
int len_left = len;
while (true) {
int sent = HANDLE_EINTR(send(socket_, send_buf, len_left, 0));
if (sent == len_left) { // A shortcut to avoid extraneous checks.
break;
}
if (sent == kSocketError) {
#if defined(OS_WIN)
if (WSAGetLastError() != WSAEWOULDBLOCK) {
LOG(ERROR) << "send failed: WSAGetLastError()==" << WSAGetLastError();
#elif defined(OS_POSIX)
if (errno != EWOULDBLOCK && errno != EAGAIN) {
LOG(ERROR) << "send failed: errno==" << errno;
#endif
break;
}
// Otherwise we would block, and now we have to wait for a retry.
// Fall through to PlatformThread::YieldCurrentThread()
} else {
// sent != len_left according to the shortcut above.
// Shift the buffer start and send the remainder after a short while.
send_buf += sent;
len_left -= sent;
}
base::PlatformThread::YieldCurrentThread();
}
return NULL;
}
void ListenSocket::Listen() {
......@@ -108,17 +174,6 @@ void ListenSocket::Listen() {
#endif
}
SOCKET ListenSocket::Accept(SOCKET s) {
sockaddr_in from;
socklen_t from_len = sizeof(from);
SOCKET conn =
HANDLE_EINTR(accept(s, reinterpret_cast<sockaddr*>(&from), &from_len));
if (conn != kInvalidSocket) {
net::SetNonBlocking(conn);
}
return conn;
}
void ListenSocket::Accept() {
SOCKET conn = Accept(socket_);
if (conn != kInvalidSocket) {
......@@ -166,17 +221,6 @@ void ListenSocket::Read() {
} while (len == kReadBufSize);
}
void ListenSocket::CloseSocket(SOCKET s) {
if (s && s != kInvalidSocket) {
UnwatchSocket();
#if defined(OS_WIN)
closesocket(s);
#elif defined(OS_POSIX)
close(s);
#endif
}
}
void ListenSocket::Close() {
#if defined(OS_POSIX)
if (wait_state_ == WAITING_CLOSE)
......@@ -186,12 +230,15 @@ void ListenSocket::Close() {
socket_delegate_->DidClose(this);
}
void ListenSocket::UnwatchSocket() {
void ListenSocket::CloseSocket(SOCKET s) {
if (s && s != kInvalidSocket) {
UnwatchSocket();
#if defined(OS_WIN)
watcher_.StopWatching();
closesocket(s);
#elif defined(OS_POSIX)
watcher_.StopWatchingFileDescriptor();
close(s);
#endif
}
}
void ListenSocket::WatchSocket(WaitState state) {
......@@ -206,59 +253,12 @@ void ListenSocket::WatchSocket(WaitState state) {
#endif
}
void ListenSocket::SendInternal(const char* bytes, int len) {
char* send_buf = const_cast<char *>(bytes);
int len_left = len;
while (true) {
int sent = HANDLE_EINTR(send(socket_, send_buf, len_left, 0));
if (sent == len_left) { // A shortcut to avoid extraneous checks.
break;
}
if (sent == kSocketError) {
void ListenSocket::UnwatchSocket() {
#if defined(OS_WIN)
if (WSAGetLastError() != WSAEWOULDBLOCK) {
LOG(ERROR) << "send failed: WSAGetLastError()==" << WSAGetLastError();
watcher_.StopWatching();
#elif defined(OS_POSIX)
if (errno != EWOULDBLOCK && errno != EAGAIN) {
LOG(ERROR) << "send failed: errno==" << errno;
watcher_.StopWatchingFileDescriptor();
#endif
break;
}
// Otherwise we would block, and now we have to wait for a retry.
// Fall through to PlatformThread::YieldCurrentThread()
} else {
// sent != len_left according to the shortcut above.
// Shift the buffer start and send the remainder after a short while.
send_buf += sent;
len_left -= sent;
}
base::PlatformThread::YieldCurrentThread();
}
}
void ListenSocket::Send(const char* bytes, int len, bool append_linefeed) {
SendInternal(bytes, len);
if (append_linefeed) {
SendInternal("\r\n", 2);
}
}
void ListenSocket::Send(const std::string& str, bool append_linefeed) {
Send(str.data(), static_cast<int>(str.length()), append_linefeed);
}
void ListenSocket::PauseReads() {
DCHECK(!reads_paused_);
reads_paused_ = true;
}
void ListenSocket::ResumeReads() {
DCHECK(reads_paused_);
reads_paused_ = false;
if (has_pending_reads_) {
has_pending_reads_ = false;
Read();
}
}
// TODO(ibrar): We can add these functions into OS dependent files
......
......@@ -76,6 +76,13 @@ class ListenSocket : public base::RefCountedThreadSafe<ListenSocket>,
protected:
friend class base::RefCountedThreadSafe<ListenSocket>;
enum WaitState {
NOT_WAITING = 0,
WAITING_ACCEPT = 1,
WAITING_READ = 3,
WAITING_CLOSE = 4
};
static const SOCKET kInvalidSocket;
static const int kSocketError;
......@@ -93,12 +100,6 @@ class ListenSocket : public base::RefCountedThreadSafe<ListenSocket>,
virtual void Close();
virtual void CloseSocket(SOCKET s);
enum WaitState {
NOT_WAITING = 0,
WAITING_ACCEPT = 1,
WAITING_READ = 3,
WAITING_CLOSE = 4
};
// Pass any value in case of Windows, because in Windows
// we are not using state.
void WatchSocket(WaitState state);
......@@ -110,12 +111,12 @@ class ListenSocket : public base::RefCountedThreadSafe<ListenSocket>,
base::win::ObjectWatcher watcher_;
HANDLE socket_event_;
#elif defined(OS_POSIX)
WaitState wait_state_;
// The socket's libevent wrapper
MessageLoopForIO::FileDescriptorWatcher watcher_;
// Called by MessagePumpLibevent when the socket is ready to do I/O
virtual void OnFileCanReadWithoutBlocking(int fd);
virtual void OnFileCanWriteWithoutBlocking(int fd);
WaitState wait_state_;
// The socket's libevent wrapper
MessageLoopForIO::FileDescriptorWatcher watcher_;
#endif
SOCKET socket_;
......
......@@ -10,12 +10,6 @@
namespace net {
HTTPSProber::HTTPSProber() {
}
HTTPSProber::~HTTPSProber() {
}
// static
HTTPSProber* HTTPSProber::GetInstance() {
return Singleton<HTTPSProber>::get();
......@@ -46,26 +40,6 @@ bool HTTPSProber::ProbeHost(const std::string& host, URLRequestContext* ctx,
return true;
}
void HTTPSProber::Success(net::URLRequest* request) {
DoCallback(request, true);
}
void HTTPSProber::Failure(net::URLRequest* request) {
DoCallback(request, false);
}
void HTTPSProber::DoCallback(net::URLRequest* request, bool result) {
std::map<std::string, HTTPSProberDelegate*>::iterator i =
inflight_probes_.find(request->original_url().host());
DCHECK(i != inflight_probes_.end());
HTTPSProberDelegate* delegate = i->second;
inflight_probes_.erase(i);
probed_.insert(request->original_url().host());
delete request;
delegate->ProbeComplete(result);
}
void HTTPSProber::OnAuthRequired(net::URLRequest* request,
net::AuthChallengeInfo* auth_info) {
Success(request);
......@@ -89,4 +63,30 @@ void HTTPSProber::OnReadCompleted(net::URLRequest* request, int bytes_read) {
NOTREACHED();
}
HTTPSProber::HTTPSProber() {
}
HTTPSProber::~HTTPSProber() {
}
void HTTPSProber::Success(net::URLRequest* request) {
DoCallback(request, true);
}
void HTTPSProber::Failure(net::URLRequest* request) {
DoCallback(request, false);
}
void HTTPSProber::DoCallback(net::URLRequest* request, bool result) {
std::map<std::string, HTTPSProberDelegate*>::iterator i =
inflight_probes_.find(request->original_url().host());
DCHECK(i != inflight_probes_.end());
HTTPSProberDelegate* delegate = i->second;
inflight_probes_.erase(i);
probed_.insert(request->original_url().host());
delete request;
delegate->ProbeComplete(result);
}
} // namespace net
......@@ -63,6 +63,8 @@ class HTTPSProber : public net::URLRequest::Delegate {
virtual void OnReadCompleted(net::URLRequest* request, int bytes_read);
private:
friend struct DefaultSingletonTraits<HTTPSProber>;
HTTPSProber();
~HTTPSProber();
......@@ -73,7 +75,6 @@ class HTTPSProber : public net::URLRequest::Delegate {
std::map<std::string, HTTPSProberDelegate*> inflight_probes_;
std::set<std::string> probed_;
friend struct DefaultSingletonTraits<HTTPSProber>;
DISALLOW_COPY_AND_ASSIGN(HTTPSProber);
};
......
......@@ -58,6 +58,21 @@ class X509Certificate;
//
class URLRequest : public base::NonThreadSafe {
public:
// Callback function implemented by protocol handlers to create new jobs.
// The factory may return NULL to indicate an error, which will cause other
// factories to be queried. If no factory handles the request, then the
// default job will be used.
typedef URLRequestJob* (ProtocolFactory)(URLRequest* request,
const std::string& scheme);
// HTTP request/response header IDs (via some preprocessor fun) for use with
// SetRequestHeaderById and GetResponseHeaderById.
enum {
#define HTTP_ATOM(x) HTTP_ ## x,
#include "net/http/http_atom_list.h"
#undef HTTP_ATOM
};
// Derive from this class and add your own data members to associate extra
// information with a URLRequest. Use GetUserData(key) and SetUserData()
class UserData {
......@@ -66,13 +81,6 @@ class URLRequest : public base::NonThreadSafe {
virtual ~UserData() {}
};
// Callback function implemented by protocol handlers to create new jobs.
// The factory may return NULL to indicate an error, which will cause other
// factories to be queried. If no factory handles the request, then the
// default job will be used.
typedef URLRequestJob* (ProtocolFactory)(URLRequest* request,
const std::string& scheme);
// This class handles network interception. Use with
// (Un)RegisterRequestInterceptor.
class Interceptor {
......@@ -509,14 +517,6 @@ class URLRequest : public base::NonThreadSafe {
// cancel the request instead, call Cancel().
void ContinueDespiteLastError();
// HTTP request/response header IDs (via some preprocessor fun) for use with
// SetRequestHeaderById and GetResponseHeaderById.
enum {
#define HTTP_ATOM(x) HTTP_ ## x,
#include "net/http/http_atom_list.h"
#undef HTTP_ATOM
};
// Returns true if performance profiling should be enabled on the
// URLRequestJob serving this request.
bool enable_profiling() const { return enable_profiling_; }
......@@ -568,6 +568,7 @@ class URLRequest : public base::NonThreadSafe {
private:
friend class URLRequestJob;
typedef std::map<const void*, linked_ptr<UserData> > UserDataMap;
void StartJob(URLRequestJob* job);
......@@ -620,7 +621,6 @@ class URLRequest : public base::NonThreadSafe {
bool is_pending_;
// Externally-defined data accessible by key
typedef std::map<const void*, linked_ptr<UserData> > UserDataMap;
UserDataMap user_data_;
// Whether to enable performance profiling on the job serving this request.
......
......@@ -10,16 +10,16 @@
namespace net {
URLRequestDataJob::URLRequestDataJob(URLRequest* request)
: URLRequestSimpleJob(request) {
}
// static
URLRequestJob* URLRequestDataJob::Factory(URLRequest* request,
const std::string& scheme) {
return new URLRequestDataJob(request);
}
URLRequestDataJob::URLRequestDataJob(URLRequest* request)
: URLRequestSimpleJob(request) {
}
bool URLRequestDataJob::GetData(std::string* mime_type,
std::string* charset,
std::string* data) const {
......
......@@ -19,12 +19,13 @@ class URLRequestDataJob : public URLRequestSimpleJob {
public:
explicit URLRequestDataJob(URLRequest* request);
static URLRequest::ProtocolFactory Factory;
// URLRequestSimpleJob
virtual bool GetData(std::string* mime_type,
std::string* charset,
std::string* data) const;
static URLRequest::ProtocolFactory Factory;
private:
~URLRequestDataJob();
......
......@@ -12,12 +12,7 @@ namespace net {
URLRequestFilter* URLRequestFilter::shared_instance_ = NULL;
// static
URLRequestFilter* URLRequestFilter::GetInstance() {
if (!shared_instance_)
shared_instance_ = new URLRequestFilter;
return shared_instance_;
}
URLRequestFilter::~URLRequestFilter() {}
// static
net::URLRequestJob* URLRequestFilter::Factory(net::URLRequest* request,
......@@ -26,7 +21,12 @@ net::URLRequestJob* URLRequestFilter::Factory(net::URLRequest* request,
return GetInstance()->FindRequestHandler(request, scheme);
}
URLRequestFilter::~URLRequestFilter() {}
// static
URLRequestFilter* URLRequestFilter::GetInstance() {
if (!shared_instance_)
shared_instance_ = new URLRequestFilter;
return shared_instance_;
}
void URLRequestFilter::AddHostnameHandler(const std::string& scheme,
const std::string& hostname, net::URLRequest::ProtocolFactory* factory) {
......
......@@ -39,12 +39,12 @@ class URLRequestFilter {
typedef base::hash_map<std::string, net::URLRequest::ProtocolFactory*>
UrlHandlerMap;
// Singleton instance for use.
static URLRequestFilter* GetInstance();
~URLRequestFilter();
static net::URLRequest::ProtocolFactory Factory;
~URLRequestFilter();
// Singleton instance for use.
static URLRequestFilter* GetInstance();
void AddHostnameHandler(const std::string& scheme,
const std::string& hostname,
......
......@@ -29,9 +29,6 @@ URLRequestFtpJob::URLRequestFtpJob(URLRequest* request)
ALLOW_THIS_IN_INITIALIZER_LIST(method_factory_(this)) {
}
URLRequestFtpJob::~URLRequestFtpJob() {
}
// static
URLRequestJob* URLRequestFtpJob::Factory(URLRequest* request,
const std::string& scheme) {
......@@ -55,6 +52,105 @@ bool URLRequestFtpJob::GetMimeType(std::string* mime_type) const {
return false;
}
URLRequestFtpJob::~URLRequestFtpJob() {
}
void URLRequestFtpJob::StartTransaction() {
// Create a transaction.
DCHECK(!transaction_.get());
DCHECK(request_->context());
DCHECK(request_->context()->ftp_transaction_factory());
transaction_.reset(
request_->context()->ftp_transaction_factory()->CreateTransaction());
// No matter what, we want to report our status as IO pending since we will
// be notifying our consumer asynchronously via OnStartCompleted.
SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
int rv;
if (transaction_.get()) {
rv = transaction_->Start(
&request_info_, &start_callback_, request_->net_log());
if (rv == ERR_IO_PENDING)
return;
} else {
rv = ERR_FAILED;
}
// The transaction started synchronously, but we need to notify the
// URLRequest delegate via the message loop.
MessageLoop::current()->PostTask(
FROM_HERE,
method_factory_.NewRunnableMethod(
&URLRequestFtpJob::OnStartCompleted, rv));
}
void URLRequestFtpJob::OnStartCompleted(int result) {
// Clear the IO_PENDING status
SetStatus(URLRequestStatus());
// FTP obviously doesn't have HTTP Content-Length header. We have to pass
// the content size information manually.
set_expected_content_size(
transaction_->GetResponseInfo()->expected_content_size);
if (result == OK) {
NotifyHeadersComplete();
} else if (transaction_->GetResponseInfo()->needs_auth) {
GURL origin = request_->url().GetOrigin();
if (server_auth_ && server_auth_->state == AUTH_STATE_HAVE_AUTH) {
request_->context()->ftp_auth_cache()->Remove(origin,
server_auth_->username,
server_auth_->password);
} else if (!server_auth_) {
server_auth_ = new AuthData();
}
server_auth_->state = AUTH_STATE_NEED_AUTH;
FtpAuthCache::Entry* cached_auth =
request_->context()->ftp_auth_cache()->Lookup(origin);
if (cached_auth) {
// Retry using cached auth data.
SetAuth(cached_auth->username, cached_auth->password);
} else {
// Prompt for a username/password.
NotifyHeadersComplete();
}
} else {
NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
}
}
void URLRequestFtpJob::OnReadCompleted(int result) {
read_in_progress_ = false;
if (result == 0) {
NotifyDone(URLRequestStatus());
} else if (result < 0) {
NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
} else {
// Clear the IO_PENDING status
SetStatus(URLRequestStatus());
}
NotifyReadComplete(result);
}
void URLRequestFtpJob::RestartTransactionWithAuth() {
DCHECK(server_auth_ && server_auth_->state == AUTH_STATE_HAVE_AUTH);
// No matter what, we want to report our status as IO pending since we will
// be notifying our consumer asynchronously via OnStartCompleted.
SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
int rv = transaction_->RestartWithAuth(server_auth_->username,
server_auth_->password,
&start_callback_);
if (rv == ERR_IO_PENDING)
return;
MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
this, &URLRequestFtpJob::OnStartCompleted, rv));
}
void URLRequestFtpJob::Start() {
DCHECK(!transaction_.get());
request_info_.url = request_->url();
......@@ -147,100 +243,4 @@ bool URLRequestFtpJob::ReadRawData(IOBuffer* buf,
return false;
}
void URLRequestFtpJob::OnStartCompleted(int result) {
// Clear the IO_PENDING status
SetStatus(URLRequestStatus());
// FTP obviously doesn't have HTTP Content-Length header. We have to pass
// the content size information manually.
set_expected_content_size(
transaction_->GetResponseInfo()->expected_content_size);
if (result == OK) {
NotifyHeadersComplete();
} else if (transaction_->GetResponseInfo()->needs_auth) {
GURL origin = request_->url().GetOrigin();
if (server_auth_ && server_auth_->state == AUTH_STATE_HAVE_AUTH) {
request_->context()->ftp_auth_cache()->Remove(origin,
server_auth_->username,
server_auth_->password);
} else if (!server_auth_) {
server_auth_ = new AuthData();
}
server_auth_->state = AUTH_STATE_NEED_AUTH;
FtpAuthCache::Entry* cached_auth =
request_->context()->ftp_auth_cache()->Lookup(origin);
if (cached_auth) {
// Retry using cached auth data.
SetAuth(cached_auth->username, cached_auth->password);
} else {
// Prompt for a username/password.
NotifyHeadersComplete();
}
} else {
NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
}
}
void URLRequestFtpJob::OnReadCompleted(int result) {
read_in_progress_ = false;
if (result == 0) {
NotifyDone(URLRequestStatus());
} else if (result < 0) {
NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
} else {
// Clear the IO_PENDING status
SetStatus(URLRequestStatus());
}
NotifyReadComplete(result);
}
void URLRequestFtpJob::RestartTransactionWithAuth() {
DCHECK(server_auth_ && server_auth_->state == AUTH_STATE_HAVE_AUTH);
// No matter what, we want to report our status as IO pending since we will
// be notifying our consumer asynchronously via OnStartCompleted.
SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
int rv = transaction_->RestartWithAuth(server_auth_->username,
server_auth_->password,
&start_callback_);
if (rv == ERR_IO_PENDING)
return;
MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
this, &URLRequestFtpJob::OnStartCompleted, rv));
}
void URLRequestFtpJob::StartTransaction() {
// Create a transaction.
DCHECK(!transaction_.get());
DCHECK(request_->context());
DCHECK(request_->context()->ftp_transaction_factory());
transaction_.reset(
request_->context()->ftp_transaction_factory()->CreateTransaction());
// No matter what, we want to report our status as IO pending since we will
// be notifying our consumer asynchronously via OnStartCompleted.
SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
int rv;
if (transaction_.get()) {
rv = transaction_->Start(
&request_info_, &start_callback_, request_->net_log());
if (rv == ERR_IO_PENDING)
return;
} else {
rv = ERR_FAILED;
}
// The transaction started synchronously, but we need to notify the
// URLRequest delegate via the message loop.
MessageLoop::current()->PostTask(
FROM_HERE,
method_factory_.NewRunnableMethod(
&URLRequestFtpJob::OnStartCompleted, rv));
}
} // namespace net
......@@ -24,7 +24,6 @@ class URLRequestContext;
// provides an implementation for FTP.
class URLRequestFtpJob : public URLRequestJob {
public:
explicit URLRequestFtpJob(URLRequest* request);
static URLRequestJob* Factory(URLRequest* request,
......@@ -36,6 +35,15 @@ class URLRequestFtpJob : public URLRequestJob {
private:
virtual ~URLRequestFtpJob();
void StartTransaction();
void OnStartCompleted(int result);
void OnReadCompleted(int result);
void RestartTransactionWithAuth();
void LogFtpServerType(char server_type);
// Overridden from URLRequestJob:
virtual void Start();
virtual void Kill();
......@@ -51,15 +59,6 @@ class URLRequestFtpJob : public URLRequestJob {
virtual uint64 GetUploadProgress() const;
virtual bool ReadRawData(IOBuffer* buf, int buf_size, int *bytes_read);
void StartTransaction();
void OnStartCompleted(int result);
void OnReadCompleted(int result);
void RestartTransactionWithAuth();
void LogFtpServerType(char server_type);
FtpRequestInfo request_info_;
scoped_ptr<FtpTransaction> transaction_;
......
This diff is collapsed.
......@@ -34,6 +34,31 @@ class URLRequestHttpJob : public URLRequestJob {
protected:
explicit URLRequestHttpJob(URLRequest* request);
// Shadows URLRequestJob's version of this method so we can grab cookies.
void NotifyHeadersComplete();
void DestroyTransaction();
void StartTransaction();
void AddExtraHeaders();
void AddCookieHeaderAndStart();
void SaveCookiesAndNotifyHeadersComplete();
void SaveNextCookie();
void FetchResponseCookies(const HttpResponseInfo* response_info,
std::vector<std::string>* cookies);
// Process the Strict-Transport-Security header, if one exists.
void ProcessStrictTransportSecurityHeader();
void OnCanGetCookiesCompleted(int result);
void OnCanSetCookieCompleted(int result);
void OnStartCompleted(int result);
void OnReadCompleted(int result);
bool ShouldTreatAsCertificateError(int result);
void RestartTransactionWithAuth(const string16& username,
const string16& password);
// Overridden from URLRequestJob:
virtual void SetUpload(UploadData* upload);
virtual void SetExtraRequestHeaders(const HttpRequestHeaders& headers);
......@@ -61,31 +86,6 @@ class URLRequestHttpJob : public URLRequestJob {
virtual bool ReadRawData(IOBuffer* buf, int buf_size, int *bytes_read);
virtual void StopCaching();
// Shadows URLRequestJob's version of this method so we can grab cookies.
void NotifyHeadersComplete();
void DestroyTransaction();
void StartTransaction();
void AddExtraHeaders();
void AddCookieHeaderAndStart();
void SaveCookiesAndNotifyHeadersComplete();
void SaveNextCookie();
void FetchResponseCookies(const HttpResponseInfo* response_info,
std::vector<std::string>* cookies);
// Process the Strict-Transport-Security header, if one exists.
void ProcessStrictTransportSecurityHeader();
void OnCanGetCookiesCompleted(int result);
void OnCanSetCookieCompleted(int result);
void OnStartCompleted(int result);
void OnReadCompleted(int result);
bool ShouldTreatAsCertificateError(int result);
void RestartTransactionWithAuth(const string16& username,
const string16& password);
// Keep a reference to the url request context to be sure it's not deleted
// before us.
scoped_refptr<URLRequestContext> context_;
......
This diff is collapsed.
......@@ -129,13 +129,6 @@ class URLRequestJob : public base::RefCounted<URLRequestJob>,
virtual bool GetContentEncodings(
std::vector<Filter::FilterType>* encoding_types);
// Find out if this is a download.
virtual bool IsDownload() const;
// Find out if this is a response to a request that advertised an SDCH
// dictionary. Only makes sense for some types of requests.
virtual bool IsSdchResponse() const;
// Called to setup stream filter for this request. An example of filter is
// content encoding/decoding.
void SetupFilter();
......@@ -208,6 +201,8 @@ class URLRequestJob : public base::RefCounted<URLRequestJob>,
virtual bool GetMimeType(std::string* mime_type) const;
virtual bool GetURL(GURL* gurl) const;
virtual base::Time GetRequestTime() const;
virtual bool IsDownload() const;
virtual bool IsSdchResponse() const;
virtual bool IsCachedContent() const;
virtual int64 GetByteReadCount() const;
virtual int GetResponseCode() const;
......@@ -277,16 +272,16 @@ class URLRequestJob : public base::RefCounted<URLRequestJob>,
// to get SDCH to emit stats.
void DestroyFilters() { filter_.reset(); }
// The request that initiated this job. This value MAY BE NULL if the
// request was released by DetachRequest().
net::URLRequest* request_;
// The status of the job.
const net::URLRequestStatus GetStatus();
// Set the status of the job.
void SetStatus(const net::URLRequestStatus& status);
// The request that initiated this job. This value MAY BE NULL if the
// request was released by DetachRequest().
net::URLRequest* request_;
// Whether the job is doing performance profiling
bool is_profiling_;
......
......@@ -32,6 +32,8 @@ class URLRequestJobTracker {
// The observer's methods are called on the thread that called AddObserver.
class JobObserver {
public:
virtual ~JobObserver() {}
// Called after the given job has been added to the list
virtual void OnJobAdded(URLRequestJob* job) = 0;
......@@ -54,8 +56,6 @@ class URLRequestJobTracker {
// duration of the OnBytesRead callback.
virtual void OnBytesRead(URLRequestJob* job, const char* buf,
int byte_count) = 0;
virtual ~JobObserver() {}
};
URLRequestJobTracker();
......
......@@ -65,17 +65,36 @@ URLRequestThrottlerEntry::URLRequestThrottlerEntry(
Initialize();
}
URLRequestThrottlerEntry::~URLRequestThrottlerEntry() {
}
bool URLRequestThrottlerEntry::IsEntryOutdated() const {
if (entry_lifetime_ms_ == -1)
return false;
void URLRequestThrottlerEntry::Initialize() {
// Since this method is called by the constructors, GetTimeNow() (a virtual
// method) is not used.
exponential_backoff_release_time_ = base::TimeTicks::Now();
failure_count_ = 0;
latest_response_was_failure_ = false;
base::TimeTicks now = GetTimeNow();
sliding_window_release_time_ = base::TimeTicks::Now();
// If there are send events in the sliding window period, we still need this
// entry.
if (send_log_.size() > 0 &&
send_log_.back() + sliding_window_period_ > now) {
return false;
}
int64 unused_since_ms =
(now - exponential_backoff_release_time_).InMilliseconds();
// Release time is further than now, we are managing it.
if (unused_since_ms < 0)
return false;
// latest_response_was_failure_ is true indicates that the latest one or
// more requests encountered server errors or had malformed response bodies.
// In that case, we don't want to collect the entry unless it hasn't been used
// for longer than the maximum allowed back-off.
if (latest_response_was_failure_)
return unused_since_ms > std::max(maximum_backoff_ms_, entry_lifetime_ms_);
// Otherwise, consider the entry is outdated if it hasn't been used for the
// specified lifetime period.
return unused_since_ms > entry_lifetime_ms_;
}
bool URLRequestThrottlerEntry::IsDuringExponentialBackoff() const {
......@@ -153,38 +172,6 @@ void URLRequestThrottlerEntry::UpdateWithResponse(
}
}
bool URLRequestThrottlerEntry::IsEntryOutdated() const {
if (entry_lifetime_ms_ == -1)
return false;
base::TimeTicks now = GetTimeNow();
// If there are send events in the sliding window period, we still need this
// entry.
if (send_log_.size() > 0 &&
send_log_.back() + sliding_window_period_ > now) {
return false;
}
int64 unused_since_ms =
(now - exponential_backoff_release_time_).InMilliseconds();
// Release time is further than now, we are managing it.
if (unused_since_ms < 0)
return false;
// latest_response_was_failure_ is true indicates that the latest one or
// more requests encountered server errors or had malformed response bodies.
// In that case, we don't want to collect the entry unless it hasn't been used
// for longer than the maximum allowed back-off.
if (latest_response_was_failure_)
return unused_since_ms > std::max(maximum_backoff_ms_, entry_lifetime_ms_);
// Otherwise, consider the entry is outdated if it hasn't been used for the
// specified lifetime period.
return unused_since_ms > entry_lifetime_ms_;
}
void URLRequestThrottlerEntry::ReceivedContentWasMalformed() {
// For any response that is marked as malformed now, we have probably
// considered it as a success when receiving it and decreased the failure
......@@ -199,6 +186,19 @@ void URLRequestThrottlerEntry::ReceivedContentWasMalformed() {
exponential_backoff_release_time_ = CalculateExponentialBackoffReleaseTime();
}
URLRequestThrottlerEntry::~URLRequestThrottlerEntry() {
}
void URLRequestThrottlerEntry::Initialize() {
// Since this method is called by the constructors, GetTimeNow() (a virtual
// method) is not used.
exponential_backoff_release_time_ = base::TimeTicks::Now();
failure_count_ = 0;
latest_response_was_failure_ = false;
sliding_window_release_time_ = base::TimeTicks::Now();
}
base::TimeTicks
URLRequestThrottlerEntry::CalculateExponentialBackoffReleaseTime() {
double delay = initial_backoff_ms_;
......
......@@ -68,6 +68,10 @@ class URLRequestThrottlerEntry : public URLRequestThrottlerEntryInterface {
double jitter_factor,
int maximum_backoff_ms);
// Used by the manager, returns true if the entry needs to be garbage
// collected.
bool IsEntryOutdated() const;
// Implementation of URLRequestThrottlerEntryInterface.
virtual bool IsDuringExponentialBackoff() const;
virtual int64 ReserveSendingTimeForNextRequest(
......@@ -77,10 +81,6 @@ class URLRequestThrottlerEntry : public URLRequestThrottlerEntryInterface {
const URLRequestThrottlerHeaderInterface* response);
virtual void ReceivedContentWasMalformed();
// Used by the manager, returns true if the entry needs to be garbage
// collected.
bool IsEntryOutdated() const;
protected:
virtual ~URLRequestThrottlerEntry();
......
......@@ -31,6 +31,27 @@ scoped_refptr<URLRequestThrottlerEntryInterface>
return entry;
}
void URLRequestThrottlerManager::OverrideEntryForTests(
const GURL& url,
URLRequestThrottlerEntry* entry) {
if (entry == NULL)
return;
// Normalize the url.
std::string url_id = GetIdFromUrl(url);
// Periodically garbage collect old entries.
GarbageCollectEntriesIfNecessary();
url_entries_[url_id] = entry;
}
void URLRequestThrottlerManager::EraseEntryForTests(const GURL& url) {
// Normalize the url.
std::string url_id = GetIdFromUrl(url);
url_entries_.erase(url_id);
}
URLRequestThrottlerManager::URLRequestThrottlerManager()
: requests_since_last_gc_(0),
enforce_throttling_(true) {
......@@ -58,6 +79,15 @@ std::string URLRequestThrottlerManager::GetIdFromUrl(const GURL& url) const {
return StringToLowerASCII(id.spec());
}
void URLRequestThrottlerManager::GarbageCollectEntriesIfNecessary() {
requests_since_last_gc_++;
if (requests_since_last_gc_ < kRequestsBetweenCollecting)
return;
requests_since_last_gc_ = 0;
GarbageCollectEntries();
}
void URLRequestThrottlerManager::GarbageCollectEntries() {
UrlEntryMap::iterator i = url_entries_.begin();
......@@ -75,34 +105,4 @@ void URLRequestThrottlerManager::GarbageCollectEntries() {
}
}
void URLRequestThrottlerManager::GarbageCollectEntriesIfNecessary() {
requests_since_last_gc_++;
if (requests_since_last_gc_ < kRequestsBetweenCollecting)
return;
requests_since_last_gc_ = 0;
GarbageCollectEntries();
}
void URLRequestThrottlerManager::OverrideEntryForTests(
const GURL& url,
URLRequestThrottlerEntry* entry) {
if (entry == NULL)
return;
// Normalize the url.
std::string url_id = GetIdFromUrl(url);
// Periodically garbage collect old entries.
GarbageCollectEntriesIfNecessary();
url_entries_[url_id] = entry;
}
void URLRequestThrottlerManager::EraseEntryForTests(const GURL& url) {
// Normalize the url.
std::string url_id = GetIdFromUrl(url);
url_entries_.erase(url_id);
}
} // namespace net
......@@ -70,6 +70,7 @@ class URLRequestThrottlerManager {
// which garbage collecting happens is adjustable with the
// kRequestBetweenCollecting constant.
void GarbageCollectEntriesIfNecessary();
// Method that does the actual work of garbage collecting.
void GarbageCollectEntries();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment