Commit 4b9e9b62 authored by Brian White's avatar Brian White Committed by Commit Bot

Do histogram validation when processing Renderer death.

There are hints that the corruption being observed happens around
the death of a renderer.  Add (temporary) instrumentation inside the
processing of this event to see if this can be narrowed down further.

Bug: 744734
Change-Id: I63b2bfeaa96d2e8f8f299cd2c48cca130e72a8ec
Reviewed-on: https://chromium-review.googlesource.com/655922Reviewed-by: default avatarAlexei Svitkine <asvitkine@chromium.org>
Commit-Queue: Brian White <bcwhite@chromium.org>
Cr-Commit-Position: refs/heads/master@{#500443}
parent 8f602bfc
...@@ -262,6 +262,16 @@ ...@@ -262,6 +262,16 @@
#define IntToStringType base::IntToString #define IntToStringType base::IntToString
#endif #endif
// This is temporary to try to locate a core trampler.
// TODO(bcwhite): Remove when crbug/736675 is resolved.
#if defined(OS_ANDROID)
#include "base/metrics/statistics_recorder.h"
#define VALIDATE_ALL_HISTOGRAMS(x) \
base::StatisticsRecorder::ValidateAllHistograms(x)
#else
#define VALIDATE_ALL_HISTOGRAMS(x)
#endif
namespace content { namespace content {
namespace { namespace {
...@@ -2986,6 +2996,8 @@ void RenderProcessHostImpl::Cleanup() { ...@@ -2986,6 +2996,8 @@ void RenderProcessHostImpl::Cleanup() {
} }
delayed_cleanup_needed_ = false; delayed_cleanup_needed_ = false;
VALIDATE_ALL_HISTOGRAMS(61);
// Records the time when the process starts kept alive by the ref count for // Records the time when the process starts kept alive by the ref count for
// UMA. // UMA.
if (listeners_.IsEmpty() && keep_alive_ref_count_ > 0 && if (listeners_.IsEmpty() && keep_alive_ref_count_ > 0 &&
...@@ -3013,6 +3025,8 @@ void RenderProcessHostImpl::Cleanup() { ...@@ -3013,6 +3025,8 @@ void RenderProcessHostImpl::Cleanup() {
DCHECK_EQ(0, pending_views_); DCHECK_EQ(0, pending_views_);
VALIDATE_ALL_HISTOGRAMS(62);
// If the process associated with this RenderProcessHost is still alive, // If the process associated with this RenderProcessHost is still alive,
// notify all observers that the process has exited cleanly, even though it // notify all observers that the process has exited cleanly, even though it
// will be destroyed a bit later. Observers shouldn't rely on this process // will be destroyed a bit later. Observers shouldn't rely on this process
...@@ -3023,12 +3037,17 @@ void RenderProcessHostImpl::Cleanup() { ...@@ -3023,12 +3037,17 @@ void RenderProcessHostImpl::Cleanup() {
this, base::TERMINATION_STATUS_NORMAL_TERMINATION, 0); this, base::TERMINATION_STATUS_NORMAL_TERMINATION, 0);
} }
} }
VALIDATE_ALL_HISTOGRAMS(63);
for (auto& observer : observers_) for (auto& observer : observers_)
observer.RenderProcessHostDestroyed(this); observer.RenderProcessHostDestroyed(this);
NotificationService::current()->Notify( NotificationService::current()->Notify(
NOTIFICATION_RENDERER_PROCESS_TERMINATED, NOTIFICATION_RENDERER_PROCESS_TERMINATED,
Source<RenderProcessHost>(this), NotificationService::NoDetails()); Source<RenderProcessHost>(this), NotificationService::NoDetails());
VALIDATE_ALL_HISTOGRAMS(64);
if (connection_filter_id_ != if (connection_filter_id_ !=
ServiceManagerConnection::kInvalidConnectionFilterId) { ServiceManagerConnection::kInvalidConnectionFilterId) {
ServiceManagerConnection* service_manager_connection = ServiceManagerConnection* service_manager_connection =
...@@ -3037,6 +3056,8 @@ void RenderProcessHostImpl::Cleanup() { ...@@ -3037,6 +3056,8 @@ void RenderProcessHostImpl::Cleanup() {
service_manager_connection->RemoveConnectionFilter(connection_filter_id_); service_manager_connection->RemoveConnectionFilter(connection_filter_id_);
connection_filter_id_ = connection_filter_id_ =
ServiceManagerConnection::kInvalidConnectionFilterId; ServiceManagerConnection::kInvalidConnectionFilterId;
VALIDATE_ALL_HISTOGRAMS(65);
} }
#ifndef NDEBUG #ifndef NDEBUG
...@@ -3045,6 +3066,8 @@ void RenderProcessHostImpl::Cleanup() { ...@@ -3045,6 +3066,8 @@ void RenderProcessHostImpl::Cleanup() {
base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, this); base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, this);
deleting_soon_ = true; deleting_soon_ = true;
VALIDATE_ALL_HISTOGRAMS(66);
// It's important not to wait for the DeleteTask to delete the channel // It's important not to wait for the DeleteTask to delete the channel
// proxy. Kill it off now. That way, in case the profile is going away, the // proxy. Kill it off now. That way, in case the profile is going away, the
// rest of the objects attached to this RenderProcessHost start going // rest of the objects attached to this RenderProcessHost start going
...@@ -3052,16 +3075,22 @@ void RenderProcessHostImpl::Cleanup() { ...@@ -3052,16 +3075,22 @@ void RenderProcessHostImpl::Cleanup() {
// OnChannelClosed() to IPC::ChannelProxy::Context on the IO thread. // OnChannelClosed() to IPC::ChannelProxy::Context on the IO thread.
ResetChannelProxy(); ResetChannelProxy();
VALIDATE_ALL_HISTOGRAMS(67);
// Its important to remove the kSessionStorageHolder after the channel // Its important to remove the kSessionStorageHolder after the channel
// has been reset to avoid deleting the underlying namespaces prior // has been reset to avoid deleting the underlying namespaces prior
// to processing ipcs referring to them. // to processing ipcs referring to them.
DCHECK(!channel_); DCHECK(!channel_);
RemoveUserData(kSessionStorageHolderKey); RemoveUserData(kSessionStorageHolderKey);
VALIDATE_ALL_HISTOGRAMS(68);
// Remove ourself from the list of renderer processes so that we can't be // Remove ourself from the list of renderer processes so that we can't be
// reused in between now and when the Delete task runs. // reused in between now and when the Delete task runs.
UnregisterHost(GetID()); UnregisterHost(GetID());
VALIDATE_ALL_HISTOGRAMS(69);
instance_weak_factory_.reset( instance_weak_factory_.reset(
new base::WeakPtrFactory<RenderProcessHostImpl>(this)); new base::WeakPtrFactory<RenderProcessHostImpl>(this));
} }
...@@ -3624,6 +3653,7 @@ void RenderProcessHostImpl::ProcessDied(bool already_dead, ...@@ -3624,6 +3653,7 @@ void RenderProcessHostImpl::ProcessDied(bool already_dead,
// It should not be possible for a process death notification to come in while // It should not be possible for a process death notification to come in while
// we are dying. // we are dying.
DCHECK(!deleting_soon_); DCHECK(!deleting_soon_);
VALIDATE_ALL_HISTOGRAMS(10);
// child_process_launcher_ can be NULL in single process mode or if fast // child_process_launcher_ can be NULL in single process mode or if fast
// termination happened. // termination happened.
...@@ -3648,6 +3678,8 @@ void RenderProcessHostImpl::ProcessDied(bool already_dead, ...@@ -3648,6 +3678,8 @@ void RenderProcessHostImpl::ProcessDied(bool already_dead,
RendererClosedDetails details(status, exit_code); RendererClosedDetails details(status, exit_code);
VALIDATE_ALL_HISTOGRAMS(20);
child_process_launcher_.reset(); child_process_launcher_.reset();
is_dead_ = true; is_dead_ = true;
if (route_provider_binding_.is_bound()) if (route_provider_binding_.is_bound())
...@@ -3658,6 +3690,8 @@ void RenderProcessHostImpl::ProcessDied(bool already_dead, ...@@ -3658,6 +3690,8 @@ void RenderProcessHostImpl::ProcessDied(bool already_dead,
UpdateProcessPriority(); UpdateProcessPriority();
VALIDATE_ALL_HISTOGRAMS(30);
within_process_died_observer_ = true; within_process_died_observer_ = true;
NotificationService::current()->Notify( NotificationService::current()->Notify(
NOTIFICATION_RENDERER_PROCESS_CLOSED, Source<RenderProcessHost>(this), NOTIFICATION_RENDERER_PROCESS_CLOSED, Source<RenderProcessHost>(this),
...@@ -3668,6 +3702,8 @@ void RenderProcessHostImpl::ProcessDied(bool already_dead, ...@@ -3668,6 +3702,8 @@ void RenderProcessHostImpl::ProcessDied(bool already_dead,
RemoveUserData(kSessionStorageHolderKey); RemoveUserData(kSessionStorageHolderKey);
VALIDATE_ALL_HISTOGRAMS(40);
base::IDMap<IPC::Listener*>::iterator iter(&listeners_); base::IDMap<IPC::Listener*>::iterator iter(&listeners_);
while (!iter.IsAtEnd()) { while (!iter.IsAtEnd()) {
iter.GetCurrentValue()->OnMessageReceived(FrameHostMsg_RenderProcessGone( iter.GetCurrentValue()->OnMessageReceived(FrameHostMsg_RenderProcessGone(
...@@ -3675,6 +3711,8 @@ void RenderProcessHostImpl::ProcessDied(bool already_dead, ...@@ -3675,6 +3711,8 @@ void RenderProcessHostImpl::ProcessDied(bool already_dead,
iter.Advance(); iter.Advance();
} }
VALIDATE_ALL_HISTOGRAMS(50);
// Initialize a new ChannelProxy in case this host is re-used for a new // Initialize a new ChannelProxy in case this host is re-used for a new
// process. This ensures that new messages can be sent on the host ASAP (even // process. This ensures that new messages can be sent on the host ASAP (even
// before Init()) and they'll eventually reach the new process. // before Init()) and they'll eventually reach the new process.
...@@ -3682,6 +3720,8 @@ void RenderProcessHostImpl::ProcessDied(bool already_dead, ...@@ -3682,6 +3720,8 @@ void RenderProcessHostImpl::ProcessDied(bool already_dead,
// Note that this may have already been called by one of the above observers // Note that this may have already been called by one of the above observers
EnableSendQueue(); EnableSendQueue();
VALIDATE_ALL_HISTOGRAMS(60);
// It's possible that one of the calls out to the observers might have caused // It's possible that one of the calls out to the observers might have caused
// this object to be no longer needed. // this object to be no longer needed.
if (delayed_cleanup_needed_) if (delayed_cleanup_needed_)
...@@ -3698,6 +3738,7 @@ void RenderProcessHostImpl::ProcessDied(bool already_dead, ...@@ -3698,6 +3738,7 @@ void RenderProcessHostImpl::ProcessDied(bool already_dead,
// This object is not deleted at this point and might be reused later. // This object is not deleted at this point and might be reused later.
// TODO(darin): clean this up // TODO(darin): clean this up
VALIDATE_ALL_HISTOGRAMS(99);
} }
size_t RenderProcessHost::GetActiveViewCount() { size_t RenderProcessHost::GetActiveViewCount() {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment