Commit d2283ac2 authored by mdempsky's avatar mdempsky Committed by Commit bot

Split AssembleFilter into comprehensible chunks

Previously SandboxBPF constructed the CodeGen Instruction sequence
primarily in one large, complex function.  Additionally, it made
extensive use of the CodeGen::JoinInstructions() function to
conditionally arrange various bits of instructions, which made it
harder to follow.

This CL splits the Instruction assembly code into 5 mostly distinct
functions and eliminates all use of JoinInstruction() in favor of
function composition.  E.g., instead of

    foo = gen->MakeInstruction(...);
    bar = gen->MakeInstruction(...);
    gen->JoinInstructions(foo, bar);

this CL favors writing

    MakeFoo(MakeBar())

with the convention that Instruction-constructing functions should
arrange for control to transfer to the Instruction sequence argument
when complete.  (I.e., "continuation-passing style":
http://en.wikipedia.org/wiki/Continuation-passing_style)

BUG=414363

Review URL: https://codereview.chromium.org/568053005

Cr-Commit-Position: refs/heads/master@{#295015}
parent 31e9fd65
...@@ -12,11 +12,13 @@ ...@@ -12,11 +12,13 @@
#include <errno.h> #include <errno.h>
#include <fcntl.h> #include <fcntl.h>
#include <signal.h>
#include <string.h> #include <string.h>
#include <sys/prctl.h> #include <sys/prctl.h>
#include <sys/stat.h> #include <sys/stat.h>
#include <sys/syscall.h> #include <sys/syscall.h>
#include <sys/types.h> #include <sys/types.h>
#include <sys/wait.h>
#include <time.h> #include <time.h>
#include <unistd.h> #include <unistd.h>
...@@ -28,9 +30,11 @@ ...@@ -28,9 +30,11 @@
#include "base/memory/scoped_ptr.h" #include "base/memory/scoped_ptr.h"
#include "base/posix/eintr_wrapper.h" #include "base/posix/eintr_wrapper.h"
#include "sandbox/linux/seccomp-bpf/codegen.h" #include "sandbox/linux/seccomp-bpf/codegen.h"
#include "sandbox/linux/seccomp-bpf/errorcode.h"
#include "sandbox/linux/seccomp-bpf/sandbox_bpf_policy.h" #include "sandbox/linux/seccomp-bpf/sandbox_bpf_policy.h"
#include "sandbox/linux/seccomp-bpf/syscall.h" #include "sandbox/linux/seccomp-bpf/syscall.h"
#include "sandbox/linux/seccomp-bpf/syscall_iterator.h" #include "sandbox/linux/seccomp-bpf/syscall_iterator.h"
#include "sandbox/linux/seccomp-bpf/trap.h"
#include "sandbox/linux/seccomp-bpf/verifier.h" #include "sandbox/linux/seccomp-bpf/verifier.h"
#include "sandbox/linux/services/linux_syscalls.h" #include "sandbox/linux/services/linux_syscalls.h"
...@@ -40,6 +44,28 @@ namespace { ...@@ -40,6 +44,28 @@ namespace {
const int kExpectedExitCode = 100; const int kExpectedExitCode = 100;
#if defined(__i386__) || defined(__x86_64__)
const bool kIsIntel = true;
#else
const bool kIsIntel = false;
#endif
#if defined(__x86_64__) && defined(__ILP32__)
const bool kIsX32 = true;
#else
const bool kIsX32 = false;
#endif
const int kSyscallsRequiredForUnsafeTraps[] = {
__NR_rt_sigprocmask,
__NR_rt_sigreturn,
#if defined(__NR_sigprocmask)
__NR_sigprocmask,
#endif
#if defined(__NR_sigreturn)
__NR_sigreturn,
#endif
};
bool HasExactlyOneBit(uint64_t x) { bool HasExactlyOneBit(uint64_t x) {
// Common trick; e.g., see http://stackoverflow.com/a/108329. // Common trick; e.g., see http://stackoverflow.com/a/108329.
return x != 0 && (x & (x - 1)) == 0; return x != 0 && (x & (x - 1)) == 0;
...@@ -627,41 +653,66 @@ SandboxBPF::Program* SandboxBPF::AssembleFilter(bool force_verification) { ...@@ -627,41 +653,66 @@ SandboxBPF::Program* SandboxBPF::AssembleFilter(bool force_verification) {
SANDBOX_DIE("Out of memory"); SANDBOX_DIE("Out of memory");
} }
bool has_unsafe_traps;
Instruction* head = CompilePolicy(gen, &has_unsafe_traps);
// Turn the DAG into a vector of instructions.
Program* program = new Program();
gen->Compile(head, program);
delete gen;
// Make sure compilation resulted in BPF program that executes
// correctly. Otherwise, there is an internal error in our BPF compiler.
// There is really nothing the caller can do until the bug is fixed.
if (force_verification) {
// Verification is expensive. We only perform this step, if we are
// compiled in debug mode, or if the caller explicitly requested
// verification.
VerifyProgram(*program, has_unsafe_traps);
}
return program;
}
Instruction* SandboxBPF::CompilePolicy(CodeGen* gen, bool* has_unsafe_traps) {
// A compiled policy consists of three logical parts:
// 1. Check that the "arch" field matches the expected architecture.
// 2. If the policy involves unsafe traps, check if the syscall was
// invoked by Syscall::Call, and then allow it unconditionally.
// 3. Check the system call number and jump to the appropriate compiled
// system call policy number.
return CheckArch(
gen, MaybeAddEscapeHatch(gen, has_unsafe_traps, DispatchSyscall(gen)));
}
Instruction* SandboxBPF::CheckArch(CodeGen* gen, Instruction* passed) {
// If the architecture doesn't match SECCOMP_ARCH, disallow the // If the architecture doesn't match SECCOMP_ARCH, disallow the
// system call. // system call.
Instruction* tail; return gen->MakeInstruction(
Instruction* head = gen->MakeInstruction(
BPF_LD + BPF_W + BPF_ABS, BPF_LD + BPF_W + BPF_ABS,
SECCOMP_ARCH_IDX, SECCOMP_ARCH_IDX,
tail = gen->MakeInstruction( gen->MakeInstruction(
BPF_JMP + BPF_JEQ + BPF_K, BPF_JMP + BPF_JEQ + BPF_K,
SECCOMP_ARCH, SECCOMP_ARCH,
NULL, passed,
gen->MakeInstruction( RetExpression(gen,
BPF_RET + BPF_K,
Kill("Invalid audit architecture in BPF filter")))); Kill("Invalid audit architecture in BPF filter"))));
}
bool has_unsafe_traps = false; Instruction* SandboxBPF::MaybeAddEscapeHatch(CodeGen* gen,
{ bool* has_unsafe_traps,
// Evaluate all possible system calls and group their ErrorCodes into Instruction* rest) {
// ranges of identical codes.
Ranges ranges;
FindRanges(&ranges);
// Compile the system call ranges to an optimized BPF jumptable
Instruction* jumptable =
AssembleJumpTable(gen, ranges.begin(), ranges.end());
// If there is at least one UnsafeTrap() in our program, the entire sandbox // If there is at least one UnsafeTrap() in our program, the entire sandbox
// is unsafe. We need to modify the program so that all non- // is unsafe. We need to modify the program so that all non-
// SECCOMP_RET_ALLOW ErrorCodes are handled in user-space. This will then // SECCOMP_RET_ALLOW ErrorCodes are handled in user-space. This will then
// allow us to temporarily disable sandboxing rules inside of callbacks to // allow us to temporarily disable sandboxing rules inside of callbacks to
// UnsafeTrap(). // UnsafeTrap().
gen->Traverse(jumptable, CheckForUnsafeErrorCodes, &has_unsafe_traps); *has_unsafe_traps = false;
gen->Traverse(rest, CheckForUnsafeErrorCodes, has_unsafe_traps);
// Grab the system call number, so that we can implement jump tables. if (!*has_unsafe_traps) {
Instruction* load_nr = // If no unsafe traps, then simply return |rest|.
gen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, SECCOMP_NR_IDX); return rest;
}
// If our BPF program has unsafe jumps, enable support for them. This // If our BPF program has unsafe jumps, enable support for them. This
// test happens very early in the BPF filter program. Even before we // test happens very early in the BPF filter program. Even before we
...@@ -670,31 +721,19 @@ SandboxBPF::Program* SandboxBPF::AssembleFilter(bool force_verification) { ...@@ -670,31 +721,19 @@ SandboxBPF::Program* SandboxBPF::AssembleFilter(bool force_verification) {
// measures that the sandbox provides, we print a big warning message -- // measures that the sandbox provides, we print a big warning message --
// and of course, we make sure to only ever enable this feature if it // and of course, we make sure to only ever enable this feature if it
// is actually requested by the sandbox policy. // is actually requested by the sandbox policy.
if (has_unsafe_traps) {
if (Syscall::Call(-1) == -1 && errno == ENOSYS) { if (Syscall::Call(-1) == -1 && errno == ENOSYS) {
SANDBOX_DIE( SANDBOX_DIE(
"Support for UnsafeTrap() has not yet been ported to this " "Support for UnsafeTrap() has not yet been ported to this "
"architecture"); "architecture");
} }
if (!policy_->EvaluateSyscall(this, __NR_rt_sigprocmask) for (size_t i = 0; i < arraysize(kSyscallsRequiredForUnsafeTraps); ++i) {
.Equals(ErrorCode(ErrorCode::ERR_ALLOWED)) || if (!policy_->EvaluateSyscall(this, kSyscallsRequiredForUnsafeTraps[i])
!policy_->EvaluateSyscall(this, __NR_rt_sigreturn) .Equals(ErrorCode(ErrorCode::ERR_ALLOWED))) {
.Equals(ErrorCode(ErrorCode::ERR_ALLOWED))
#if defined(__NR_sigprocmask)
||
!policy_->EvaluateSyscall(this, __NR_sigprocmask)
.Equals(ErrorCode(ErrorCode::ERR_ALLOWED))
#endif
#if defined(__NR_sigreturn)
||
!policy_->EvaluateSyscall(this, __NR_sigreturn)
.Equals(ErrorCode(ErrorCode::ERR_ALLOWED))
#endif
) {
SANDBOX_DIE( SANDBOX_DIE(
"Invalid seccomp policy; if using UnsafeTrap(), you must " "Policies that use UnsafeTrap() must unconditionally allow all "
"unconditionally allow sigreturn() and sigprocmask()"); "required system calls");
}
} }
if (!Trap::EnableUnsafeTrapsInSigSysHandler()) { if (!Trap::EnableUnsafeTrapsInSigSysHandler()) {
...@@ -705,84 +744,74 @@ SandboxBPF::Program* SandboxBPF::AssembleFilter(bool force_verification) { ...@@ -705,84 +744,74 @@ SandboxBPF::Program* SandboxBPF::AssembleFilter(bool force_verification) {
// than enabling dangerous code. // than enabling dangerous code.
SANDBOX_DIE("We'd rather die than enable unsafe traps"); SANDBOX_DIE("We'd rather die than enable unsafe traps");
} }
gen->Traverse(jumptable, RedirectToUserspace, this); gen->Traverse(rest, RedirectToUserspace, this);
// Allow system calls, if they originate from our magic return address // Allow system calls, if they originate from our magic return address
// (which we can query by calling Syscall::Call(-1)). // (which we can query by calling Syscall::Call(-1)).
uintptr_t syscall_entry_point = static_cast<uintptr_t>(Syscall::Call(-1)); uint64_t syscall_entry_point =
static_cast<uint64_t>(static_cast<uintptr_t>(Syscall::Call(-1)));
uint32_t low = static_cast<uint32_t>(syscall_entry_point); uint32_t low = static_cast<uint32_t>(syscall_entry_point);
#if __SIZEOF_POINTER__ > 4
uint32_t hi = static_cast<uint32_t>(syscall_entry_point >> 32); uint32_t hi = static_cast<uint32_t>(syscall_entry_point >> 32);
#endif
// BPF cannot do native 64bit comparisons. On 64bit architectures, we // BPF cannot do native 64-bit comparisons, so we have to compare
// have to compare both 32bit halves of the instruction pointer. If they // both 32-bit halves of the instruction pointer. If they match what
// match what we expect, we return ERR_ALLOWED. If either or both don't // we expect, we return ERR_ALLOWED. If either or both don't match,
// match, we continue evalutating the rest of the sandbox policy. // we continue evalutating the rest of the sandbox policy.
Instruction* escape_hatch = gen->MakeInstruction( //
// For simplicity, we check the full 64-bit instruction pointer even
// on 32-bit architectures.
return gen->MakeInstruction(
BPF_LD + BPF_W + BPF_ABS, BPF_LD + BPF_W + BPF_ABS,
SECCOMP_IP_LSB_IDX, SECCOMP_IP_LSB_IDX,
gen->MakeInstruction( gen->MakeInstruction(
BPF_JMP + BPF_JEQ + BPF_K, BPF_JMP + BPF_JEQ + BPF_K,
low, low,
#if __SIZEOF_POINTER__ > 4
gen->MakeInstruction( gen->MakeInstruction(
BPF_LD + BPF_W + BPF_ABS, BPF_LD + BPF_W + BPF_ABS,
SECCOMP_IP_MSB_IDX, SECCOMP_IP_MSB_IDX,
gen->MakeInstruction( gen->MakeInstruction(
BPF_JMP + BPF_JEQ + BPF_K, BPF_JMP + BPF_JEQ + BPF_K,
hi, hi,
#endif RetExpression(gen, ErrorCode(ErrorCode::ERR_ALLOWED)),
gen->MakeInstruction(BPF_RET + BPF_K, rest)),
ErrorCode(ErrorCode::ERR_ALLOWED)), rest));
#if __SIZEOF_POINTER__ > 4 }
load_nr)),
#endif
load_nr));
gen->JoinInstructions(tail, escape_hatch);
} else {
gen->JoinInstructions(tail, load_nr);
}
tail = load_nr;
// On Intel architectures, verify that system call numbers are in the Instruction* SandboxBPF::DispatchSyscall(CodeGen* gen) {
// expected number range. The older i386 and x86-64 APIs clear bit 30 // Evaluate all possible system calls and group their ErrorCodes into
// on all system calls. The newer x32 API always sets bit 30. // ranges of identical codes.
#if defined(__i386__) || defined(__x86_64__) Ranges ranges;
Instruction* invalidX32 = gen->MakeInstruction( FindRanges(&ranges);
BPF_RET + BPF_K, Kill("Illegal mixing of system call ABIs").err_);
Instruction* checkX32 =
#if defined(__x86_64__) && defined(__ILP32__)
gen->MakeInstruction(
BPF_JMP + BPF_JSET + BPF_K, 0x40000000, 0, invalidX32);
#else
gen->MakeInstruction(
BPF_JMP + BPF_JSET + BPF_K, 0x40000000, invalidX32, 0);
#endif
gen->JoinInstructions(tail, checkX32);
tail = checkX32;
#endif
// Append jump table to our pre-amble // Compile the system call ranges to an optimized BPF jumptable
gen->JoinInstructions(tail, jumptable); Instruction* jumptable = AssembleJumpTable(gen, ranges.begin(), ranges.end());
}
// Turn the DAG into a vector of instructions. // Grab the system call number, so that we can check it and then
Program* program = new Program(); // execute the jump table.
gen->Compile(head, program); return gen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS,
delete gen; SECCOMP_NR_IDX,
CheckSyscallNumber(gen, jumptable));
}
// Make sure compilation resulted in BPF program that executes Instruction* SandboxBPF::CheckSyscallNumber(CodeGen* gen, Instruction* passed) {
// correctly. Otherwise, there is an internal error in our BPF compiler. if (kIsIntel) {
// There is really nothing the caller can do until the bug is fixed. // On Intel architectures, verify that system call numbers are in the
if (force_verification) { // expected number range.
// Verification is expensive. We only perform this step, if we are Instruction* invalidX32 =
// compiled in debug mode, or if the caller explicitly requested RetExpression(gen, Kill("Illegal mixing of system call ABIs"));
// verification. if (kIsX32) {
VerifyProgram(*program, has_unsafe_traps); // The newer x32 API always sets bit 30.
return gen->MakeInstruction(
BPF_JMP + BPF_JSET + BPF_K, 0x40000000, passed, invalidX32);
} else {
// The older i386 and x86-64 APIs clear bit 30 on all system calls.
return gen->MakeInstruction(
BPF_JMP + BPF_JSET + BPF_K, 0x40000000, invalidX32, passed);
}
} }
return program; // TODO(mdempsky): Similar validation for other architectures?
return passed;
} }
void SandboxBPF::VerifyProgram(const Program& program, bool has_unsafe_traps) { void SandboxBPF::VerifyProgram(const Program& program, bool has_unsafe_traps) {
...@@ -1028,16 +1057,12 @@ ErrorCode SandboxBPF::UnsafeTrap(Trap::TrapFnc fnc, const void* aux) { ...@@ -1028,16 +1057,12 @@ ErrorCode SandboxBPF::UnsafeTrap(Trap::TrapFnc fnc, const void* aux) {
} }
bool SandboxBPF::IsRequiredForUnsafeTrap(int sysno) { bool SandboxBPF::IsRequiredForUnsafeTrap(int sysno) {
return (sysno == __NR_rt_sigprocmask || sysno == __NR_rt_sigreturn for (size_t i = 0; i < arraysize(kSyscallsRequiredForUnsafeTraps); ++i) {
#if defined(__NR_sigprocmask) if (sysno == kSyscallsRequiredForUnsafeTraps[i]) {
|| return true;
sysno == __NR_sigprocmask }
#endif }
#if defined(__NR_sigreturn) return false;
||
sysno == __NR_sigreturn
#endif
);
} }
intptr_t SandboxBPF::ForwardSyscall(const struct arch_seccomp_data& args) { intptr_t SandboxBPF::ForwardSyscall(const struct arch_seccomp_data& args) {
......
...@@ -246,6 +246,37 @@ class SANDBOX_EXPORT SandboxBPF { ...@@ -246,6 +246,37 @@ class SANDBOX_EXPORT SandboxBPF {
// been configured with SetSandboxPolicy(). // been configured with SetSandboxPolicy().
void InstallFilter(bool must_sync_threads); void InstallFilter(bool must_sync_threads);
// Compile the configured policy into a complete instruction sequence.
// (See MaybeAddEscapeHatch for |has_unsafe_traps|.)
Instruction* CompilePolicy(CodeGen* gen, bool* has_unsafe_traps);
// Return an instruction sequence that checks the
// arch_seccomp_data's "arch" field is valid, and then passes
// control to |passed| if so.
Instruction* CheckArch(CodeGen* gen, Instruction* passed);
// If the |rest| instruction sequence contains any unsafe traps,
// then sets |*has_unsafe_traps| to true and returns an instruction
// sequence that allows all system calls from Syscall::Call(), and
// otherwise passes control to |rest|.
//
// If |rest| contains no unsafe traps, then |rest| is returned
// directly and |*has_unsafe_traps| is set to false.
Instruction* MaybeAddEscapeHatch(CodeGen* gen,
bool* has_unsafe_traps,
Instruction* rest);
// Return an instruction sequence that loads and checks the system
// call number, performs a binary search, and then dispatches to an
// appropriate instruction sequence compiled from the current
// policy.
Instruction* DispatchSyscall(CodeGen* gen);
// Return an instruction sequence that checks the system call number
// (expected to be loaded in register A) and if valid, passes
// control to |passed| (with register A still valid).
Instruction* CheckSyscallNumber(CodeGen* gen, Instruction* passed);
// Verify the correctness of a compiled program by comparing it against the // Verify the correctness of a compiled program by comparing it against the
// current policy. This function should only ever be called by unit tests and // current policy. This function should only ever be called by unit tests and
// by the sandbox internals. It should not be used by production code. // by the sandbox internals. It should not be used by production code.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment