Commit ecbbe049 authored by bradnelson's avatar bradnelson Committed by Commit bot

Fix two errors with dup2.

Fixing incorrect handling of out of range descriptors passed as newfd to dup2.

Fixing incorrect free descriptor management when using dup2 on a previously
allocated descriptor value.

Replacing a heap with a std::set, to allow descriptor deletion from the free
set in O(lg n).

Dropping impossible case where dup2ing a null handle.

TEST=Added two nacl_io_tests.
BUG=None
R=binji@chromium.org

Review URL: https://codereview.chromium.org/849743002

Cr-Commit-Position: refs/heads/master@{#311355}
parent b342dc70
......@@ -220,12 +220,12 @@ int KernelObject::AllocateFD(const ScopedKernelHandle& handle,
std::string abs_path = GetAbsParts(path).Join();
Descriptor_t descriptor(handle, abs_path);
// If we can recycle and FD, use that first
// If we can recycle an FD, use that first
if (free_fds_.size()) {
id = free_fds_.front();
// Force lower numbered FD to be available first.
std::pop_heap(free_fds_.begin(), free_fds_.end(), std::greater<int>());
free_fds_.pop_back();
// std::set is ordered, so begin() returns the lowest value.
id = *free_fds_.begin();
free_fds_.erase(id);
handle_map_[id] = descriptor;
} else {
id = handle_map_.size();
......@@ -238,38 +238,33 @@ int KernelObject::AllocateFD(const ScopedKernelHandle& handle,
void KernelObject::FreeAndReassignFD(int fd,
const ScopedKernelHandle& handle,
const std::string& path) {
if (NULL == handle) {
FreeFD(fd);
} else {
AUTO_LOCK(handle_lock_);
// If the required FD is larger than the current set, grow the set.
int sz = static_cast<int>(handle_map_.size());
if (fd >= sz) {
// Expand the handle map to include all the extra descriptors
// up to and including fd.
handle_map_.resize(fd + 1);
// Add all the new descriptors, except fd, to the free list.
for (; sz < fd; ++sz) {
free_fds_.push_back(sz);
std::push_heap(free_fds_.begin(), free_fds_.end(),
std::greater<int>());
}
}
AUTO_LOCK(handle_lock_);
// This path will be from an existing handle, and absolute.
handle_map_[fd] = Descriptor_t(handle, path);
// If the required FD is larger than the current set, grow the set.
int sz = static_cast<int>(handle_map_.size());
if (fd >= sz) {
// Expand the handle map to include all the extra descriptors
// up to and including fd.
handle_map_.resize(fd + 1);
// Add all the new descriptors, except fd, to the free list.
for (; sz < fd; ++sz) {
free_fds_.insert(sz);
}
}
assert(handle != NULL);
free_fds_.erase(fd);
// This path will be from an existing handle, and absolute.
handle_map_[fd] = Descriptor_t(handle, path);
}
void KernelObject::FreeFD(int fd) {
AUTO_LOCK(handle_lock_);
handle_map_[fd].handle.reset(NULL);
free_fds_.push_back(fd);
// Force lower numbered FD to be available first.
std::push_heap(free_fds_.begin(), free_fds_.end(), std::greater<int>());
free_fds_.insert(fd);
}
} // namespace nacl_io
......@@ -8,6 +8,7 @@
#include <pthread.h>
#include <map>
#include <set>
#include <string>
#include <vector>
......@@ -104,7 +105,7 @@ class KernelObject {
private:
std::string cwd_;
mode_t umask_;
std::vector<int> free_fds_;
std::set<int> free_fds_;
HandleMap_t handle_map_;
FsMap_t filesystems_;
......
......@@ -279,6 +279,11 @@ int KernelProxy::dup2(int oldfd, int newfd) {
if (oldfd == newfd)
return newfd;
if (newfd < 0) {
errno = EBADF;
return -1;
}
ScopedKernelHandle old_handle;
std::string old_path;
Error error = AcquireHandleAndPath(oldfd, &old_handle, &old_path);
......
......@@ -527,6 +527,53 @@ TEST_F(KernelProxyTest, Dup) {
// fd, new_fd, dup_fd -> "/bar"
}
TEST_F(KernelProxyTest, DescriptorDup2Dance) {
// Open a file to a get a descriptor to copy for this test.
// The test makes the assumption at all descriptors
// open by default are contiguous starting from zero.
int fd = ki_open("/foo", O_CREAT | O_RDWR, 0777);
ASSERT_GT(fd, -1);
// The comment above each statement below tracks which descriptors,
// starting from fd are currently allocated.
// Descriptors marked with an 'x' are allocated.
// (fd) (fd + 1) (fd + 2)
// x
ASSERT_EQ(fd + 1, ki_dup2(fd, fd + 1));
// (fd) (fd + 1) (fd + 2)
// x x
ASSERT_EQ(0, ki_close(fd + 1));
// (fd) (fd + 1) (fd + 2)
// x
ASSERT_EQ(fd + 1, ki_dup2(fd, fd + 1));
// (fd) (fd + 1) (fd + 2)
// x x
ASSERT_EQ(fd + 2, ki_dup(fd));
// (fd) (fd + 1) (fd + 2)
// x x x
ASSERT_EQ(0, ki_close(fd + 2));
// (fd) (fd + 1) (fd + 2)
// x x
ASSERT_EQ(0, ki_close(fd + 1));
// (fd) (fd + 1) (fd + 2)
// x
ASSERT_EQ(0, ki_close(fd));
}
TEST_F(KernelProxyTest, Dup2Negative) {
// Open a file to a get a descriptor to copy for this test.
// The test makes the assumption at all descriptors
// open by default are contiguous starting from zero.
int fd = ki_open("/foo", O_CREAT | O_RDWR, 0777);
ASSERT_GT(fd, -1);
// Attempt to dup2 to an invalid descriptor.
ASSERT_EQ(-1, ki_dup2(fd, -12));
EXPECT_EQ(EBADF, errno);
ASSERT_EQ(0, ki_close(fd));
}
TEST_F(KernelProxyTest, DescriptorAllocationConsistency) {
// Check that the descriptor free list returns the expected ones,
// as the order is mandated by POSIX.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment