diff --git a/.github/workflows/validate.yml b/.github/workflows/validate.yml index c3a85599e13..1a1ec9a0417 100644 --- a/.github/workflows/validate.yml +++ b/.github/workflows/validate.yml @@ -13,11 +13,14 @@ jobs: runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v2 + - name: install deps + run: | + sudo apt -q update + sudo apt -q install libseccomp-dev - uses: golangci/golangci-lint-action@v2 with: # must be specified without patch version version: v1.31 - # Only show new issues for a pull request. only-new-issues: true diff --git a/Dockerfile b/Dockerfile index 1b38390eab6..f5ce90c0efe 100644 --- a/Dockerfile +++ b/Dockerfile @@ -23,6 +23,7 @@ RUN echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debi crossbuild-essential-ppc64el \ curl \ gawk \ + gcc \ iptables \ jq \ kmod \ diff --git a/Vagrantfile.centos7 b/Vagrantfile.centos7 index 037ecd769b0..4dcfa4bca61 100644 --- a/Vagrantfile.centos7 +++ b/Vagrantfile.centos7 @@ -22,7 +22,7 @@ Vagrant.configure("2") do |config| # install yum packages yum install -y -q epel-release (cd /etc/yum.repos.d && curl -O https://copr.fedorainfracloud.org/coprs/adrian/criu-el7/repo/epel-7/adrian-criu-el7-epel-7.repo) - yum install -y -q gcc git iptables jq libseccomp-devel make skopeo criu + yum install -y -q gcc git iptables jq glibc-static libseccomp-devel make skopeo criu yum clean all # install Go diff --git a/Vagrantfile.fedora33 b/Vagrantfile.fedora33 index a32bed4a07e..dc5fc30c9c6 100644 --- a/Vagrantfile.fedora33 +++ b/Vagrantfile.fedora33 @@ -21,7 +21,7 @@ Vagrant.configure("2") do |config| config exclude kernel,kernel-core config install_weak_deps false update -install iptables gcc make golang-go libseccomp-devel bats jq git-core criu skopeo +install iptables gcc make golang-go glibc-static libseccomp-devel bats jq git-core criu skopeo ts run EOF done diff --git a/go.mod b/go.mod index f475b5ccdf9..61859160e83 100644 --- a/go.mod +++ b/go.mod @@ -23,5 +23,6 @@ require ( github.com/urfave/cli v1.22.1 github.com/vishvananda/netlink v1.1.0 github.com/willf/bitset v1.1.11 - golang.org/x/sys v0.0.0-20201107080550-4d91cf3a1aaf + golang.org/x/net v0.0.0-20201224014010-6772e930b67b + golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 ) diff --git a/go.sum b/go.sum index e5ed617ef1a..f80183c88af 100644 --- a/go.sum +++ b/go.sum @@ -62,14 +62,19 @@ github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7Zo github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201107080550-4d91cf3a1aaf h1:kt3wY1Lu5MJAnKTfoMR52Cu4gwvna4VTzNOiT8tY73s= -golang.org/x/sys v0.0.0-20201107080550-4d91cf3a1aaf/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= diff --git a/libcontainer/seccomp/patchbpf/enosys_linux.go b/libcontainer/seccomp/patchbpf/enosys_linux.go new file mode 100644 index 00000000000..cdbd59da095 --- /dev/null +++ b/libcontainer/seccomp/patchbpf/enosys_linux.go @@ -0,0 +1,628 @@ +// +build linux,cgo,seccomp + +package patchbpf + +import ( + "encoding/binary" + "io" + "os" + "runtime" + "unsafe" + + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/utils" + + "github.com/pkg/errors" + libseccomp "github.com/seccomp/libseccomp-golang" + "github.com/sirupsen/logrus" + "golang.org/x/net/bpf" + "golang.org/x/sys/unix" +) + +// #cgo pkg-config: libseccomp +/* +#include +#include +#include +#include + +const uint32_t C_ACT_ERRNO_ENOSYS = SCMP_ACT_ERRNO(ENOSYS); + +// Copied from . + +#ifndef SECCOMP_SET_MODE_FILTER +# define SECCOMP_SET_MODE_FILTER 1 +#endif +const uintptr_t C_SET_MODE_FILTER = SECCOMP_SET_MODE_FILTER; + +#ifndef SECCOMP_FILTER_FLAG_LOG +# define SECCOMP_FILTER_FLAG_LOG (1UL << 1) +#endif +const uintptr_t C_FILTER_FLAG_LOG = SECCOMP_FILTER_FLAG_LOG; + +// We use the AUDIT_ARCH_* values because those are the ones used by the kernel +// and SCMP_ARCH_* sometimes has fake values (such as SCMP_ARCH_X32). But we +// use so we get libseccomp's fallback definitions of AUDIT_ARCH_*. + +const uint32_t C_AUDIT_ARCH_I386 = AUDIT_ARCH_I386; +const uint32_t C_AUDIT_ARCH_X86_64 = AUDIT_ARCH_X86_64; +const uint32_t C_AUDIT_ARCH_ARM = AUDIT_ARCH_ARM; +const uint32_t C_AUDIT_ARCH_AARCH64 = AUDIT_ARCH_AARCH64; +const uint32_t C_AUDIT_ARCH_MIPS = AUDIT_ARCH_MIPS; +const uint32_t C_AUDIT_ARCH_MIPS64 = AUDIT_ARCH_MIPS64; +const uint32_t C_AUDIT_ARCH_MIPS64N32 = AUDIT_ARCH_MIPS64N32; +const uint32_t C_AUDIT_ARCH_MIPSEL = AUDIT_ARCH_MIPSEL; +const uint32_t C_AUDIT_ARCH_MIPSEL64 = AUDIT_ARCH_MIPSEL64; +const uint32_t C_AUDIT_ARCH_MIPSEL64N32 = AUDIT_ARCH_MIPSEL64N32; +const uint32_t C_AUDIT_ARCH_PPC = AUDIT_ARCH_PPC; +const uint32_t C_AUDIT_ARCH_PPC64 = AUDIT_ARCH_PPC64; +const uint32_t C_AUDIT_ARCH_PPC64LE = AUDIT_ARCH_PPC64LE; +const uint32_t C_AUDIT_ARCH_S390 = AUDIT_ARCH_S390; +const uint32_t C_AUDIT_ARCH_S390X = AUDIT_ARCH_S390X; +*/ +import "C" + +var retErrnoEnosys = uint32(C.C_ACT_ERRNO_ENOSYS) + +func isAllowAction(action configs.Action) bool { + switch action { + // Trace is considered an "allow" action because a good tracer should + // support future syscalls (by handling -ENOSYS on its own), and giving + // -ENOSYS will be disruptive for emulation. + case configs.Allow, configs.Log, configs.Trace: + return true + default: + return false + } +} + +func parseProgram(rdr io.Reader) ([]bpf.RawInstruction, error) { + var program []bpf.RawInstruction +loop: + for { + // Read the next instruction. We have to use NativeEndian because + // seccomp_export_bpf outputs the program in *host* endian-ness. + var insn unix.SockFilter + if err := binary.Read(rdr, utils.NativeEndian, &insn); err != nil { + switch err { + case io.EOF: + // Parsing complete. + break loop + case io.ErrUnexpectedEOF: + // Parsing stopped mid-instruction. + return nil, errors.Wrap(err, "program parsing halted mid-instruction") + default: + // All other errors. + return nil, errors.Wrap(err, "parsing instructions") + } + } + program = append(program, bpf.RawInstruction{ + Op: insn.Code, + Jt: insn.Jt, + Jf: insn.Jf, + K: insn.K, + }) + } + return program, nil +} + +func disassembleFilter(filter *libseccomp.ScmpFilter) ([]bpf.Instruction, error) { + rdr, wtr, err := os.Pipe() + if err != nil { + return nil, errors.Wrap(err, "creating scratch pipe") + } + defer wtr.Close() + defer rdr.Close() + + if err := filter.ExportBPF(wtr); err != nil { + return nil, errors.Wrap(err, "exporting BPF") + } + // Close so that the reader actually gets EOF. + _ = wtr.Close() + + // Parse the instructions. + rawProgram, err := parseProgram(rdr) + if err != nil { + return nil, errors.Wrap(err, "parsing generated BPF filter") + } + program, ok := bpf.Disassemble(rawProgram) + if !ok { + return nil, errors.Errorf("could not disassemble entire BPF filter") + } + return program, nil +} + +type nativeArch uint32 + +const invalidArch nativeArch = 0 + +func archToNative(arch libseccomp.ScmpArch) (nativeArch, error) { + switch arch { + case libseccomp.ArchNative: + // Convert to actual native architecture. + arch, err := libseccomp.GetNativeArch() + if err != nil { + return invalidArch, errors.Wrap(err, "get native arch") + } + return archToNative(arch) + case libseccomp.ArchX86: + return nativeArch(C.C_AUDIT_ARCH_I386), nil + case libseccomp.ArchAMD64, libseccomp.ArchX32: + // NOTE: x32 is treated like x86_64 except all x32 syscalls have the + // 30th bit of the syscall number set to indicate that it's not a + // normal x86_64 syscall. + return nativeArch(C.C_AUDIT_ARCH_X86_64), nil + case libseccomp.ArchARM: + return nativeArch(C.C_AUDIT_ARCH_ARM), nil + case libseccomp.ArchARM64: + return nativeArch(C.C_AUDIT_ARCH_AARCH64), nil + case libseccomp.ArchMIPS: + return nativeArch(C.C_AUDIT_ARCH_MIPS), nil + case libseccomp.ArchMIPS64: + return nativeArch(C.C_AUDIT_ARCH_MIPS64), nil + case libseccomp.ArchMIPS64N32: + return nativeArch(C.C_AUDIT_ARCH_MIPS64N32), nil + case libseccomp.ArchMIPSEL: + return nativeArch(C.C_AUDIT_ARCH_MIPSEL), nil + case libseccomp.ArchMIPSEL64: + return nativeArch(C.C_AUDIT_ARCH_MIPSEL64), nil + case libseccomp.ArchMIPSEL64N32: + return nativeArch(C.C_AUDIT_ARCH_MIPSEL64N32), nil + case libseccomp.ArchPPC: + return nativeArch(C.C_AUDIT_ARCH_PPC), nil + case libseccomp.ArchPPC64: + return nativeArch(C.C_AUDIT_ARCH_PPC64), nil + case libseccomp.ArchPPC64LE: + return nativeArch(C.C_AUDIT_ARCH_PPC64LE), nil + case libseccomp.ArchS390: + return nativeArch(C.C_AUDIT_ARCH_S390), nil + case libseccomp.ArchS390X: + return nativeArch(C.C_AUDIT_ARCH_S390X), nil + default: + return invalidArch, errors.Errorf("unknown architecture: %v", arch) + } +} + +type lastSyscallMap map[nativeArch]map[libseccomp.ScmpArch]libseccomp.ScmpSyscall + +// Figure out largest syscall number referenced in the filter for each +// architecture. We will be generating code based on the native architecture +// representation, but SCMP_ARCH_X32 means we have to track cases where the +// same architecture has different largest syscalls based on the mode. +func findLastSyscalls(config *configs.Seccomp) (lastSyscallMap, error) { + lastSyscalls := make(lastSyscallMap) + // Only loop over architectures which are present in the filter. Any other + // architectures will get the libseccomp bad architecture action anyway. + for _, ociArch := range config.Architectures { + arch, err := libseccomp.GetArchFromString(ociArch) + if err != nil { + return nil, errors.Wrap(err, "validating seccomp architecture") + } + + // Map native architecture to a real architecture value to avoid + // doubling-up the lastSyscall mapping. + if arch == libseccomp.ArchNative { + nativeArch, err := libseccomp.GetNativeArch() + if err != nil { + return nil, errors.Wrap(err, "get native arch") + } + arch = nativeArch + } + + // Figure out native architecture representation of the architecture. + nativeArch, err := archToNative(arch) + if err != nil { + return nil, errors.Wrapf(err, "cannot map architecture %v to AUDIT_ARCH_ constant", arch) + } + + if _, ok := lastSyscalls[nativeArch]; !ok { + lastSyscalls[nativeArch] = map[libseccomp.ScmpArch]libseccomp.ScmpSyscall{} + } + if _, ok := lastSyscalls[nativeArch][arch]; ok { + // Because of ArchNative we may hit the same entry multiple times. + // Just skip it if we've seen this (nativeArch, ScmpArch) + // combination before. + continue + } + + // Find the largest syscall in the filter for this architecture. + var largestSyscall libseccomp.ScmpSyscall + for _, rule := range config.Syscalls { + sysno, err := libseccomp.GetSyscallFromNameByArch(rule.Name, arch) + if err != nil { + // Ignore unknown syscalls. + continue + } + if sysno > largestSyscall { + largestSyscall = sysno + } + } + if largestSyscall != 0 { + lastSyscalls[nativeArch][arch] = largestSyscall + } else { + logrus.Warnf("could not find any syscalls for arch %s", ociArch) + delete(lastSyscalls[nativeArch], arch) + } + } + return lastSyscalls, nil +} + +// FIXME FIXME FIXME +// +// This solution is less than ideal. In the future it would be great to have +// per-arch information about which syscalls were added in which kernel +// versions so we can create far more accurate filter rules (handling holes in +// the syscall table and determining -ENOSYS requirements based on kernel +// minimum version alone. +// +// This implementation can in principle cause issues with syscalls like +// close_range(2) which were added out-of-order in the syscall table between +// kernel releases. +func generateEnosysStub(lastSyscalls lastSyscallMap) ([]bpf.Instruction, error) { + // A jump-table for each nativeArch used to generate the initial + // conditional jumps -- measured from the *END* of the program so they + // remain valid after prepending to the tail. + archJumpTable := map[nativeArch]uint32{} + + // Generate our own -ENOSYS rules for each architecture. They have to be + // generated in reverse (prepended to the tail of the program) because the + // JumpIf jumps need to be computed from the end of the program. + programTail := []bpf.Instruction{ + // Fall-through rules jump into the filter. + bpf.Jump{Skip: 1}, + // Rules which jump to here get -ENOSYS. + bpf.RetConstant{Val: retErrnoEnosys}, + } + + // Generate the syscall -ENOSYS rules. + for nativeArch, maxSyscalls := range lastSyscalls { + // The number of instructions from the tail of this section which need + // to be jumped in order to reach the -ENOSYS return. If the section + // does not jump, it will fall through to the actual filter. + baseJumpEnosys := uint32(len(programTail) - 1) + baseJumpFilter := baseJumpEnosys + 1 + + // Add the load instruction for the syscall number -- we jump here + // directly from the arch code so we need to do it here. Sadly we can't + // share this code between architecture branches. + section := []bpf.Instruction{ + // load [0] + bpf.LoadAbsolute{Off: 0, Size: 4}, // NOTE: We assume sizeof(int) == 4. + } + + switch len(maxSyscalls) { + case 0: + // No syscalls found for this arch -- skip it and move on. + continue + case 1: + // Get the only syscall in the map. + var sysno libseccomp.ScmpSyscall + for _, no := range maxSyscalls { + sysno = no + } + + // The simplest case just boils down to a single jgt instruction, + // with special handling if baseJumpEnosys is larger than 255 (and + // thus a long jump is required). + var sectionTail []bpf.Instruction + if baseJumpEnosys+1 <= 255 { + sectionTail = []bpf.Instruction{ + // jgt [syscall],[baseJumpEnosys+1] + bpf.JumpIf{ + Cond: bpf.JumpGreaterThan, + Val: uint32(sysno), + SkipTrue: uint8(baseJumpEnosys + 1)}, + // ja [baseJumpFilter] + bpf.Jump{Skip: baseJumpFilter}, + } + } else { + sectionTail = []bpf.Instruction{ + // jle [syscall],1 + bpf.JumpIf{Cond: bpf.JumpLessOrEqual, Val: uint32(sysno), SkipTrue: 1}, + // ja [baseJumpEnosys+1] + bpf.Jump{Skip: baseJumpEnosys + 1}, + // ja [baseJumpFilter] + bpf.Jump{Skip: baseJumpFilter}, + } + } + + // If we're on x86 we need to add a check for x32 and if we're in + // the wrong mode we jump over the section. + if uint32(nativeArch) == uint32(C.C_AUDIT_ARCH_X86_64) { + // Grab the only architecture in the map. + var scmpArch libseccomp.ScmpArch + for arch := range maxSyscalls { + scmpArch = arch + } + + // Generate a prefix to check the mode. + switch scmpArch { + case libseccomp.ArchAMD64: + sectionTail = append([]bpf.Instruction{ + // jset (1<<30),[len(tail)-1] + bpf.JumpIf{Cond: bpf.JumpBitsSet, + Val: 1 << 30, + SkipTrue: uint8(len(sectionTail) - 1)}, + }, sectionTail...) + case libseccomp.ArchX32: + sectionTail = append([]bpf.Instruction{ + // jset (1<<30),0,[len(tail)-1] + bpf.JumpIf{Cond: bpf.JumpBitsNotSet, + Val: 1 << 30, + SkipTrue: uint8(len(sectionTail) - 1)}, + }, sectionTail...) + default: + return nil, errors.Errorf("unknown amd64 native architecture %#x", scmpArch) + } + } + + section = append(section, sectionTail...) + case 2: + // x32 and x86_64 are a unique case, we can't handle any others. + if uint32(nativeArch) != uint32(C.C_AUDIT_ARCH_X86_64) { + return nil, errors.Errorf("unknown architecture overlap on native arch %#x", nativeArch) + } + + x32sysno, ok := maxSyscalls[libseccomp.ArchX32] + if !ok { + return nil, errors.Errorf("missing %v in overlapping x86_64 arch: %v", libseccomp.ArchX32, maxSyscalls) + } + x86sysno, ok := maxSyscalls[libseccomp.ArchAMD64] + if !ok { + return nil, errors.Errorf("missing %v in overlapping x86_64 arch: %v", libseccomp.ArchAMD64, maxSyscalls) + } + + // The x32 ABI indicates that a syscall is being made by an x32 + // process by setting the 30th bit of the syscall number, but we + // need to do some special-casing depending on whether we need to + // do long jumps. + if baseJumpEnosys+2 <= 255 { + // For the simple case we want to have something like: + // jset (1<<30),1 + // jgt [x86 syscall],[baseJumpEnosys+2],1 + // jgt [x32 syscall],[baseJumpEnosys+1] + // ja [baseJumpFilter] + section = append(section, []bpf.Instruction{ + // jset (1<<30),1 + bpf.JumpIf{Cond: bpf.JumpBitsSet, Val: 1 << 30, SkipTrue: 1}, + // jgt [x86 syscall],[baseJumpEnosys+1],1 + bpf.JumpIf{ + Cond: bpf.JumpGreaterThan, + Val: uint32(x86sysno), + SkipTrue: uint8(baseJumpEnosys + 2), SkipFalse: 1}, + // jgt [x32 syscall],[baseJumpEnosys] + bpf.JumpIf{ + Cond: bpf.JumpGreaterThan, + Val: uint32(x32sysno), + SkipTrue: uint8(baseJumpEnosys + 1)}, + // ja [baseJumpFilter] + bpf.Jump{Skip: baseJumpFilter}, + }...) + } else { + // But if the [baseJumpEnosys+2] jump is larger than 255 we + // need to do a long jump like so: + // jset (1<<30),1 + // jgt [x86 syscall],1,2 + // jle [x32 syscall],1 + // ja [baseJumpEnosys+1] + // ja [baseJumpFilter] + section = append(section, []bpf.Instruction{ + // jset (1<<30),1 + bpf.JumpIf{Cond: bpf.JumpBitsSet, Val: 1 << 30, SkipTrue: 1}, + // jgt [x86 syscall],1,2 + bpf.JumpIf{ + Cond: bpf.JumpGreaterThan, + Val: uint32(x86sysno), + SkipTrue: 1, SkipFalse: 2}, + // jle [x32 syscall],[baseJumpEnosys] + bpf.JumpIf{ + Cond: bpf.JumpLessOrEqual, + Val: uint32(x32sysno), + SkipTrue: 1}, + // ja [baseJumpEnosys+1] + bpf.Jump{Skip: baseJumpEnosys + 1}, + // ja [baseJumpFilter] + bpf.Jump{Skip: baseJumpFilter}, + }...) + } + default: + return nil, errors.Errorf("invalid number of architecture overlaps: %v", len(maxSyscalls)) + } + + // Prepend this section to the tail. + programTail = append(section, programTail...) + + // Update jump table. + archJumpTable[nativeArch] = uint32(len(programTail)) + } + + // Add a dummy "jump to filter" for any architecture we might miss below. + // Such architectures will probably get the BadArch action of the filter + // regardless. + programTail = append([]bpf.Instruction{ + // ja [end of stub and start of filter] + bpf.Jump{Skip: uint32(len(programTail))}, + }, programTail...) + + // Generate the jump rules for each architecture. This has to be done in + // reverse as well for the same reason as above. We add to programTail + // directly because the jumps are impacted by each architecture rule we add + // as well. + // + // TODO: Maybe we want to optimise to avoid long jumps here? So sort the + // architectures based on how large the jumps are going to be, or + // re-sort the candidate architectures each time to make sure that we + // pick the largest jump which is going to be smaller than 255. + for nativeArch := range lastSyscalls { + // We jump forwards but the jump table is calculated from the *END*. + jump := uint32(len(programTail)) - archJumpTable[nativeArch] + + // Same routine as above -- this is a basic jeq check, complicated + // slightly if it turns out that we need to do a long jump. + if jump <= 255 { + programTail = append([]bpf.Instruction{ + // jeq [arch],[jump] + bpf.JumpIf{ + Cond: bpf.JumpEqual, + Val: uint32(nativeArch), + SkipTrue: uint8(jump)}, + }, programTail...) + } else { + programTail = append([]bpf.Instruction{ + // jne [arch],1 + bpf.JumpIf{ + Cond: bpf.JumpNotEqual, + Val: uint32(nativeArch), + SkipTrue: 1}, + // ja [jump] + bpf.Jump{Skip: jump}, + }, programTail...) + } + } + + // Prepend the load instruction for the architecture. + programTail = append([]bpf.Instruction{ + // load [4] + bpf.LoadAbsolute{Off: 4, Size: 4}, // NOTE: We assume sizeof(int) == 4. + }, programTail...) + + // And that's all folks! + return programTail, nil +} + +func assemble(program []bpf.Instruction) ([]unix.SockFilter, error) { + rawProgram, err := bpf.Assemble(program) + if err != nil { + return nil, errors.Wrap(err, "assembling program") + } + + // Convert to []unix.SockFilter for unix.SockFilter. + var filter []unix.SockFilter + for _, insn := range rawProgram { + filter = append(filter, unix.SockFilter{ + Code: insn.Op, + Jt: insn.Jt, + Jf: insn.Jf, + K: insn.K, + }) + } + return filter, nil +} + +func generatePatch(config *configs.Seccomp) ([]bpf.Instruction, error) { + // We only add the stub if the default action is not permissive. + if isAllowAction(config.DefaultAction) { + logrus.Debugf("seccomp: skipping -ENOSYS stub filter generation") + return nil, nil + } + + lastSyscalls, err := findLastSyscalls(config) + if err != nil { + return nil, errors.Wrap(err, "finding last syscalls for -ENOSYS stub") + } + stubProgram, err := generateEnosysStub(lastSyscalls) + if err != nil { + return nil, errors.Wrap(err, "generating -ENOSYS stub") + } + return stubProgram, nil +} + +func enosysPatchFilter(config *configs.Seccomp, filter *libseccomp.ScmpFilter) ([]unix.SockFilter, error) { + program, err := disassembleFilter(filter) + if err != nil { + return nil, errors.Wrap(err, "disassembling original filter") + } + + patch, err := generatePatch(config) + if err != nil { + return nil, errors.Wrap(err, "generating patch for filter") + } + fullProgram := append(patch, program...) + + logrus.Debugf("seccomp: prepending -ENOSYS stub filter to user filter...") + for idx, insn := range patch { + logrus.Debugf(" [%4.1d] %s", idx, insn) + } + logrus.Debugf(" [....] --- original filter ---") + + fprog, err := assemble(fullProgram) + if err != nil { + return nil, errors.Wrap(err, "assembling modified filter") + } + return fprog, nil +} + +func filterFlags(filter *libseccomp.ScmpFilter) (flags uint, noNewPrivs bool, err error) { + // Ignore the error since pre-2.4 libseccomp is treated as API level 0. + apiLevel, _ := libseccomp.GetApi() + + noNewPrivs, err = filter.GetNoNewPrivsBit() + if err != nil { + return 0, false, errors.Wrap(err, "fetch no_new_privs filter bit") + } + + if apiLevel >= 3 { + if logBit, err := filter.GetLogBit(); err != nil { + return 0, false, errors.Wrap(err, "fetch SECCOMP_FILTER_FLAG_LOG bit") + } else if logBit { + flags |= uint(C.C_FILTER_FLAG_LOG) + } + } + + // TODO: Support seccomp flags not yet added to libseccomp-golang... + return +} + +func sysSeccompSetFilter(flags uint, filter []unix.SockFilter) (err error) { + fprog := unix.SockFprog{ + Len: uint16(len(filter)), + Filter: &filter[0], + } + // If no seccomp flags were requested we can use the old-school prctl(2). + if flags == 0 { + err = unix.Prctl(unix.PR_SET_SECCOMP, + unix.SECCOMP_MODE_FILTER, + uintptr(unsafe.Pointer(&fprog)), 0, 0) + } else { + _, _, err = unix.RawSyscall(unix.SYS_SECCOMP, + uintptr(C.C_SET_MODE_FILTER), + uintptr(flags), uintptr(unsafe.Pointer(&fprog))) + } + runtime.KeepAlive(filter) + runtime.KeepAlive(fprog) + return +} + +// PatchAndLoad takes a seccomp configuration and a libseccomp filter which has +// been pre-configured with the set of rules in the seccomp config. It then +// patches said filter to handle -ENOSYS in a much nicer manner than the +// default libseccomp default action behaviour, and loads the patched filter +// into the kernel for the current process. +func PatchAndLoad(config *configs.Seccomp, filter *libseccomp.ScmpFilter) error { + // Generate a patched filter. + fprog, err := enosysPatchFilter(config, filter) + if err != nil { + return errors.Wrap(err, "patching filter") + } + + // Get the set of libseccomp flags set. + seccompFlags, noNewPrivs, err := filterFlags(filter) + if err != nil { + return errors.Wrap(err, "fetch seccomp filter flags") + } + + // Set no_new_privs if it was requested, though in runc we handle + // no_new_privs separately so warn if we hit this path. + if noNewPrivs { + logrus.Warnf("potentially misconfigured filter -- setting no_new_privs in seccomp path") + if err := unix.Prctl(unix.PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil { + return errors.Wrap(err, "enable no_new_privs bit") + } + } + + // Finally, load the filter. + if err := sysSeccompSetFilter(seccompFlags, fprog); err != nil { + return errors.Wrap(err, "loading seccomp filter") + } + return nil +} diff --git a/libcontainer/seccomp/patchbpf/enosys_linux_test.go b/libcontainer/seccomp/patchbpf/enosys_linux_test.go new file mode 100644 index 00000000000..17b92af9586 --- /dev/null +++ b/libcontainer/seccomp/patchbpf/enosys_linux_test.go @@ -0,0 +1,280 @@ +// +build linux,cgo,seccomp + +package patchbpf + +import ( + "bytes" + "encoding/binary" + "fmt" + "testing" + + "github.com/opencontainers/runc/libcontainer/configs" + + libseccomp "github.com/seccomp/libseccomp-golang" + "golang.org/x/net/bpf" +) + +type seccompData struct { + Syscall uint32 // NOTE: We assume sizeof(int) == 4. + Arch uint32 + IP uint64 + Args [6]uint64 +} + +// mockSyscallPayload creates a fake seccomp_data struct with the given data. +func mockSyscallPayload(t *testing.T, sysno libseccomp.ScmpSyscall, arch nativeArch, args ...uint64) []byte { + var buf bytes.Buffer + + data := seccompData{ + Syscall: uint32(sysno), + Arch: uint32(arch), + IP: 0xDEADBEEFCAFE, + } + + copy(data.Args[:], args) + if len(args) > 6 { + t.Fatalf("bad syscall payload: linux only supports 6-argument syscalls") + } + + // NOTE: We use BigEndian here because golang.org/x/net/bpf assumes that + // all payloads are big-endian while seccomp uses host endianness. + if err := binary.Write(&buf, binary.BigEndian, data); err != nil { + t.Fatalf("bad syscall payload: cannot write data: %v", err) + } + return buf.Bytes() +} + +// retFallthrough is returned by the mockFilter. If a the mock filter returns +// this value, it indicates "fallthrough to libseccomp-generated filter". +const retFallthrough uint32 = 0xDEADBEEF + +// mockFilter returns a BPF VM that contains a mock filter with an -ENOSYS +// stub. If the filter returns retFallthrough, the stub filter has permitted +// the syscall to pass. +func mockFilter(t *testing.T, config *configs.Seccomp) (*bpf.VM, []bpf.Instruction) { + patch, err := generatePatch(config) + if err != nil { + t.Fatalf("mock filter: generate enosys patch: %v", err) + } + + program := append(patch, bpf.RetConstant{Val: retFallthrough}) + + vm, err := bpf.NewVM(program) + if err != nil { + t.Fatalf("mock filter: compile BPF VM: %v", err) + } + return vm, program +} + +// fakeConfig generates a fake libcontainer seccomp configuration. The syscalls +// are added with an action distinct from the default action. +func fakeConfig(defaultAction configs.Action, explicitSyscalls []string, arches []string) *configs.Seccomp { + config := configs.Seccomp{ + DefaultAction: defaultAction, + Architectures: arches, + } + syscallAction := configs.Allow + if syscallAction == defaultAction { + syscallAction = configs.Kill + } + for _, syscall := range explicitSyscalls { + config.Syscalls = append(config.Syscalls, &configs.Syscall{ + Name: syscall, + Action: syscallAction, + }) + } + return &config +} + +// List copied from . +var testArches = []string{ + "x86", + "amd64", + "x32", + "arm", + "arm64", + "mips", + "mips64", + "mips64n32", + "mipsel", + "mipsel64", + "mipsel64n32", + "ppc", + "ppc64", + "ppc64le", + "s390", + "s390x", +} + +func archStringToNative(arch string) (nativeArch, error) { + scmpArch, err := libseccomp.GetArchFromString(arch) + if err != nil { + return 0, fmt.Errorf("unknown architecture %q: %v", arch, err) + } + return archToNative(scmpArch) +} + +func testEnosysStub(t *testing.T, defaultAction configs.Action, arches []string) { + explicitSyscalls := []string{ + "setns", + "kcmp", + "renameat2", + "copy_file_range", + } + + implicitSyscalls := []string{ + "clone", + "openat", + "read", + "write", + } + + futureSyscalls := []libseccomp.ScmpSyscall{1000, 7331} + + // Quick lookups for which arches are enabled. + archSet := map[string]bool{} + for _, arch := range arches { + archSet[arch] = true + } + + for _, test := range []struct { + start, end int + }{ + {0, 1}, // [setns] + {0, 2}, // [setns, process_vm_readv] + {1, 2}, // [process_vm_readv] + {1, 3}, // [process_vm_readv, renameat2, copy_file_range] + {1, 4}, // [process_vm_readv, renameat2, copy_file_range] + {3, 4}, // [copy_file_range] + } { + allowedSyscalls := explicitSyscalls[test.start:test.end] + config := fakeConfig(defaultAction, allowedSyscalls, arches) + filter, program := mockFilter(t, config) + + // The syscalls are in increasing order of newness, so all syscalls + // after the last allowed syscall will give -ENOSYS. + enosysStart := test.end + + for _, arch := range testArches { + type syscallTest struct { + syscall string + sysno libseccomp.ScmpSyscall + expected int + } + + scmpArch, err := libseccomp.GetArchFromString(arch) + if err != nil { + t.Fatalf("unknown libseccomp architecture %q: %v", arch, err) + } + + nativeArch, err := archToNative(scmpArch) + if err != nil { + t.Fatalf("unknown audit architecture %q: %v", arch, err) + } + + var syscallTests []syscallTest + + // Add explicit syscalls (whether they will return -ENOSYS + // depends on the filter rules). + for idx, syscall := range explicitSyscalls { + expected := int(retFallthrough) + if idx >= enosysStart { + expected = int(retErrnoEnosys) + } + sysno, err := libseccomp.GetSyscallFromNameByArch(syscall, scmpArch) + if err != nil { + t.Fatalf("unknown syscall %q on arch %q: %v", syscall, arch, err) + } + syscallTests = append(syscallTests, syscallTest{ + syscall, + sysno, + expected, + }) + } + + // Add implicit syscalls. + for _, syscall := range implicitSyscalls { + sysno, err := libseccomp.GetSyscallFromNameByArch(syscall, scmpArch) + if err != nil { + t.Fatalf("unknown syscall %q on arch %q: %v", syscall, arch, err) + } + syscallTests = append(syscallTests, syscallTest{ + sysno: sysno, + syscall: syscall, + expected: int(retFallthrough), + }) + } + + // Add future syscalls. + for _, sysno := range futureSyscalls { + baseSysno, err := libseccomp.GetSyscallFromNameByArch("copy_file_range", scmpArch) + if err != nil { + t.Fatalf("unknown syscall 'copy_file_range' on arch %q: %v", arch, err) + } + sysno += baseSysno + + syscallTests = append(syscallTests, syscallTest{ + sysno: sysno, + syscall: fmt.Sprintf("syscall_%#x", sysno), + expected: int(retErrnoEnosys), + }) + } + + // Test syscalls in the explicit list. + for _, test := range syscallTests { + // Override the expected value in the two special cases. + if !archSet[arch] || isAllowAction(defaultAction) { + test.expected = int(retFallthrough) + } + + payload := mockSyscallPayload(t, test.sysno, nativeArch, 0x1337, 0xF00BA5) + ret, err := filter.Run(payload) + if err != nil { + t.Fatalf("error running filter: %v", err) + } + if ret != test.expected { + t.Logf("mock filter for %v %v:", arches, allowedSyscalls) + for idx, insn := range program { + t.Logf(" [%4.1d] %s", idx, insn) + } + t.Logf("payload: %#v", payload) + t.Errorf("filter %s(%d) %q(%d): got %#x, want %#x", arch, nativeArch, test.syscall, test.sysno, ret, test.expected) + } + } + } + } +} + +var testActions = map[string]configs.Action{ + "allow": configs.Allow, + "log": configs.Log, + "errno": configs.Errno, + "kill": configs.Kill, +} + +func TestEnosysStub_SingleArch(t *testing.T) { + for _, arch := range testArches { + arches := []string{arch} + t.Run("arch="+arch, func(t *testing.T) { + for name, action := range testActions { + t.Run("action="+name, func(t *testing.T) { + testEnosysStub(t, action, arches) + }) + } + }) + } +} + +func TestEnosysStub_MultiArch(t *testing.T) { + for end := 0; end < len(testArches); end++ { + for start := 0; start < end; start++ { + arches := testArches[start:end] + if len(arches) <= 1 { + continue + } + for _, action := range testActions { + testEnosysStub(t, action, arches) + } + } + } +} diff --git a/libcontainer/seccomp/patchbpf/enosys_unsupported.go b/libcontainer/seccomp/patchbpf/enosys_unsupported.go new file mode 100644 index 00000000000..3312fd655e7 --- /dev/null +++ b/libcontainer/seccomp/patchbpf/enosys_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux !cgo !seccomp + +package patchbpf + +import ( + "errors" + + "github.com/opencontainers/runc/libcontainer/configs" + + libseccomp "github.com/seccomp/libseccomp-golang" +) + +func PatchAndLoad(config *configs.Seccomp, filter *libseccomp.ScmpFilter) error { + if config != nil { + return errors.New("cannot patch and load seccomp filter without runc seccomp support") + } + return nil +} diff --git a/libcontainer/seccomp/seccomp_linux.go b/libcontainer/seccomp/seccomp_linux.go index 2370a5be664..5e7b365c563 100644 --- a/libcontainer/seccomp/seccomp_linux.go +++ b/libcontainer/seccomp/seccomp_linux.go @@ -10,8 +10,9 @@ import ( "strings" "github.com/opencontainers/runc/libcontainer/configs" - libseccomp "github.com/seccomp/libseccomp-golang" + "github.com/opencontainers/runc/libcontainer/seccomp/patchbpf" + libseccomp "github.com/seccomp/libseccomp-golang" "golang.org/x/sys/unix" ) @@ -54,7 +55,6 @@ func InitSeccomp(config *configs.Seccomp) error { if err != nil { return fmt.Errorf("error validating Seccomp architecture: %s", err) } - if err := filter.AddArch(scmpArch); err != nil { return fmt.Errorf("error adding architecture to seccomp filter: %s", err) } @@ -70,16 +70,13 @@ func InitSeccomp(config *configs.Seccomp) error { if call == nil { return errors.New("encountered nil syscall while initializing Seccomp") } - - if err = matchCall(filter, call); err != nil { + if err := matchCall(filter, call); err != nil { return err } } - - if err = filter.Load(); err != nil { + if err := patchbpf.PatchAndLoad(config, filter); err != nil { return fmt.Errorf("error loading seccomp filter into kernel: %s", err) } - return nil } @@ -190,7 +187,7 @@ func matchCall(filter *libseccomp.ScmpFilter, call *configs.Syscall) error { // Unconditional match - just add the rule if len(call.Args) == 0 { - if err = filter.AddRule(callNum, callAct); err != nil { + if err := filter.AddRule(callNum, callAct); err != nil { return fmt.Errorf("error adding seccomp filter rule for syscall %s: %s", call.Name, err) } } else { @@ -224,14 +221,14 @@ func matchCall(filter *libseccomp.ScmpFilter, call *configs.Syscall) error { for _, cond := range conditions { condArr := []libseccomp.ScmpCondition{cond} - if err = filter.AddRuleConditional(callNum, callAct, condArr); err != nil { + if err := filter.AddRuleConditional(callNum, callAct, condArr); err != nil { return fmt.Errorf("error adding seccomp rule for syscall %s: %s", call.Name, err) } } } else { // No conditions share same argument // Use new, proper behavior - if err = filter.AddRuleConditional(callNum, callAct, conditions); err != nil { + if err := filter.AddRuleConditional(callNum, callAct, conditions); err != nil { return fmt.Errorf("error adding seccomp rule for syscall %s: %s", call.Name, err) } } diff --git a/libcontainer/specconv/spec_linux.go b/libcontainer/specconv/spec_linux.go index 07161ae0e52..6d646c58d83 100644 --- a/libcontainer/specconv/spec_linux.go +++ b/libcontainer/specconv/spec_linux.go @@ -847,6 +847,11 @@ func SetupSeccomp(config *specs.LinuxSeccomp) (*configs.Seccomp, error) { return nil, nil } + // We don't currently support seccomp flags. + if len(config.Flags) != 0 { + return nil, fmt.Errorf("seccomp flags are not yet supported by runc") + } + newConfig := new(configs.Seccomp) newConfig.Syscalls = []*configs.Syscall{} diff --git a/libcontainer/utils/utils.go b/libcontainer/utils/utils.go index 6966ac5e2de..1b72b7a1c1b 100644 --- a/libcontainer/utils/utils.go +++ b/libcontainer/utils/utils.go @@ -1,11 +1,13 @@ package utils import ( + "encoding/binary" "encoding/json" "io" "os" "path/filepath" "strings" + "unsafe" "golang.org/x/sys/unix" ) @@ -14,6 +16,20 @@ const ( exitSignalOffset = 128 ) +// NativeEndian is the native byte order of the host system. +var NativeEndian binary.ByteOrder + +func init() { + // Copied from . + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + NativeEndian = binary.LittleEndian + } else { + NativeEndian = binary.BigEndian + } +} + // ResolveRootfs ensures that the current working directory is // not a symlink and returns the absolute path to the rootfs func ResolveRootfs(uncleanRootfs string) (string, error) { diff --git a/tests/integration/config.json b/tests/integration/config.json deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/integration/seccomp.bats b/tests/integration/seccomp.bats new file mode 100644 index 00000000000..267c6837ab9 --- /dev/null +++ b/tests/integration/seccomp.bats @@ -0,0 +1,24 @@ +#!/usr/bin/env bats + +load helpers + +function setup() { + teardown_busybox + setup_busybox +} + +function teardown() { + teardown_busybox +} + +@test "runc run [seccomp -ENOSYS handling]" { + TEST_NAME="seccomp_syscall_test1" + + # Compile the test binary and update the config to run it. + gcc -static -o rootfs/seccomp_test "${TESTDATA}/${TEST_NAME}.c" + update_config ".linux.seccomp = $(<"${TESTDATA}/${TEST_NAME}.json")" + update_config '.process.args = ["/seccomp_test"]' + + runc run test_busybox + [ "$status" -eq 0 ] +} diff --git a/tests/integration/testdata/seccomp_syscall_test1.c b/tests/integration/testdata/seccomp_syscall_test1.c new file mode 100644 index 00000000000..d62598a209d --- /dev/null +++ b/tests/integration/testdata/seccomp_syscall_test1.c @@ -0,0 +1,79 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static int exit_code = 0; + +/* + * We need raw wrappers around each syscall so that glibc won't rewrite the + * errno value when it is returned from the seccomp filter (glibc has a habit + * of hiding -ENOSYS if possible -- which counters what we're trying to test). + */ +#define raw(name, ...) \ + syscall(SYS_ ## name, ##__VA_ARGS__) + +#define syscall_assert(sval, rval) \ + do { \ + int L = (sval), R = (rval); \ + if (L < 0) \ + L = -errno; \ + if (L != R) { \ + printf("syscall_assert(%s == %s) failed: %d != %d\n", #sval, #rval, L, R); \ + exit_code = 32; \ + } \ + } while (0) + +int main(void) +{ + // Basic permitted syscalls. + syscall_assert(write(-1, NULL, 0), -EBADF); + + // Basic syscall with masked rules. + syscall_assert(raw(socket, AF_UNIX, SOCK_STREAM, 0x000), 3); + syscall_assert(raw(socket, AF_UNIX, SOCK_STREAM, 0x0FF), -EPROTONOSUPPORT); + syscall_assert(raw(socket, AF_UNIX, SOCK_STREAM, 0x001), 4); + syscall_assert(raw(socket, AF_UNIX, SOCK_STREAM, 0x100), -EPERM); + syscall_assert(raw(socket, AF_UNIX, SOCK_STREAM, 0xC00), -EPERM); + + // Multiple arguments with OR rules. + syscall_assert(raw(process_vm_readv, 100, NULL, 0, NULL, 0, ~0), -EINVAL); + syscall_assert(raw(process_vm_readv, 9001, NULL, 0, NULL, 0, ~0), -EINVAL); + syscall_assert(raw(process_vm_readv, 0, NULL, 0, NULL, 0, ~0), -EPERM); + syscall_assert(raw(process_vm_readv, 0, NULL, 0, NULL, 0, ~0), -EPERM); + + // Multiple arguments with OR rules -- rule is ERRNO(-ENOANO). + syscall_assert(raw(process_vm_writev, 1337, NULL, 0, NULL, 0, ~0), -ENOANO); + syscall_assert(raw(process_vm_writev, 2020, NULL, 0, NULL, 0, ~0), -ENOANO); + syscall_assert(raw(process_vm_writev, 0, NULL, 0, NULL, 0, ~0), -EPERM); + syscall_assert(raw(process_vm_writev, 0, NULL, 0, NULL, 0, ~0), -EPERM); + + // Multiple arguments with AND rules. + syscall_assert(raw(kcmp, 0, 1337, 0, 0, 0), -ESRCH); + syscall_assert(raw(kcmp, 0, 0, 0, 0, 0), -EPERM); + syscall_assert(raw(kcmp, 500, 1337, 0, 0, 0), -EPERM); + syscall_assert(raw(kcmp, 500, 500, 0, 0, 0), -EPERM); + + // Multiple rules for the same syscall. + syscall_assert(raw(dup3, 0, -100, 0xFFFF), -EPERM); + syscall_assert(raw(dup3, 1, -100, 0xFFFF), -EINVAL); + syscall_assert(raw(dup3, 2, -100, 0xFFFF), -EPERM); + syscall_assert(raw(dup3, 3, -100, 0xFFFF), -EINVAL); + + // Explicitly denied syscalls (those in Linux 3.0) get -EPERM. + syscall_assert(raw(unshare, 0), -EPERM); + syscall_assert(raw(setns, 0, 0), -EPERM); + + // Out-of-bounds fake syscall. + syscall_assert(syscall(1000, 0xDEADBEEF, 0xCAFEFEED, 0x1337), -ENOSYS); + + return exit_code; +} diff --git a/tests/integration/testdata/seccomp_syscall_test1.json b/tests/integration/testdata/seccomp_syscall_test1.json new file mode 100644 index 00000000000..c48ceae7e1d --- /dev/null +++ b/tests/integration/testdata/seccomp_syscall_test1.json @@ -0,0 +1,464 @@ +{ + "defaultAction": "SCMP_ACT_ERRNO", + "architectures": [ + "SCMP_ARCH_X86", + "SCMP_ARCH_X32", + "SCMP_ARCH_X86_64", + "SCMP_ARCH_AARCH64", + "SCMP_ARCH_ARM" + ], + "syscalls": [ + { + "action": "SCMP_ACT_ALLOW", + "names": [ + "accept", + "accept4", + "access", + "adjtimex", + "alarm", + "arch_prctl", + "bind", + "brk", + "capget", + "capset", + "chdir", + "chmod", + "chown", + "chown32", + "clock_adjtime", + "clock_adjtime64", + "clock_getres", + "clock_getres_time64", + "clock_gettime", + "clock_gettime64", + "clock_nanosleep", + "clock_nanosleep_time64", + "close", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_wait", + "epoll_wait_old", + "eventfd", + "eventfd2", + "execve", + "execveat", + "exit", + "exit_group", + "faccessat", + "faccessat2", + "fadvise64", + "fadvise64_64", + "fallocate", + "fanotify_mark", + "fchdir", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fdatasync", + "fgetxattr", + "flistxattr", + "flock", + "fork", + "fremovexattr", + "fsetxattr", + "fstat", + "fstat64", + "fstatat64", + "fstatfs", + "fstatfs64", + "fsync", + "ftruncate", + "ftruncate64", + "futex", + "futex_time64", + "futimesat", + "getcpu", + "getcwd", + "getdents", + "getdents64", + "getegid", + "getegid32", + "geteuid", + "geteuid32", + "getgid", + "getgid32", + "getgroups", + "getgroups32", + "getitimer", + "getpeername", + "getpgid", + "getpgrp", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "get_robust_list", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "get_thread_area", + "gettid", + "gettimeofday", + "getuid", + "getuid32", + "getxattr", + "inotify_add_watch", + "inotify_init", + "inotify_init1", + "inotify_rm_watch", + "io_cancel", + "ioctl", + "io_destroy", + "io_getevents", + "io_pgetevents", + "io_pgetevents_time64", + "ioprio_get", + "ioprio_set", + "io_setup", + "io_submit", + "io_uring_enter", + "io_uring_register", + "io_uring_setup", + "ipc", + "kill", + "lchown", + "lchown32", + "lgetxattr", + "link", + "linkat", + "listen", + "listxattr", + "llistxattr", + "_llseek", + "lremovexattr", + "lseek", + "lsetxattr", + "lstat", + "lstat64", + "madvise", + "membarrier", + "memfd_create", + "mincore", + "mkdir", + "mkdirat", + "mknod", + "mknodat", + "mlock", + "mlock2", + "mlockall", + "mmap", + "mmap2", + "modify_ldt", + "mprotect", + "mq_getsetattr", + "mq_notify", + "mq_open", + "mq_timedreceive", + "mq_timedreceive_time64", + "mq_timedsend", + "mq_timedsend_time64", + "mq_unlink", + "mremap", + "msgctl", + "msgget", + "msgrcv", + "msgsnd", + "msync", + "munlock", + "munlockall", + "munmap", + "nanosleep", + "newfstatat", + "_newselect", + "open", + "openat", + "openat2", + "pause", + "pidfd_open", + "pidfd_send_signal", + "pipe", + "pipe2", + "poll", + "ppoll", + "ppoll_time64", + "prctl", + "pread64", + "preadv", + "preadv2", + "prlimit64", + "pselect6", + "pselect6_time64", + "pwrite64", + "pwritev", + "pwritev2", + "read", + "readahead", + "readlink", + "readlinkat", + "readv", + "recv", + "recvfrom", + "recvmmsg", + "recvmmsg_time64", + "recvmsg", + "remap_file_pages", + "removexattr", + "rename", + "renameat", + "renameat2", + "restart_syscall", + "rmdir", + "rseq", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_sigtimedwait_time64", + "rt_tgsigqueueinfo", + "sched_getaffinity", + "sched_getattr", + "sched_getparam", + "sched_get_priority_max", + "sched_get_priority_min", + "sched_getscheduler", + "sched_rr_get_interval", + "sched_rr_get_interval_time64", + "sched_setaffinity", + "sched_setattr", + "sched_setparam", + "sched_setscheduler", + "sched_yield", + "seccomp", + "select", + "semctl", + "semget", + "semop", + "semtimedop", + "semtimedop_time64", + "send", + "sendfile", + "sendfile64", + "sendmmsg", + "sendmsg", + "sendto", + "setfsgid", + "setfsgid32", + "setfsuid", + "setfsuid32", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setitimer", + "setpgid", + "setpriority", + "setregid", + "setregid32", + "setresgid", + "setresgid32", + "setresuid", + "setresuid32", + "setreuid", + "setreuid32", + "setrlimit", + "set_robust_list", + "setsid", + "setsockopt", + "set_thread_area", + "set_tid_address", + "setuid", + "setuid32", + "setxattr", + "shmat", + "shmctl", + "shmdt", + "shmget", + "shutdown", + "sigaltstack", + "signalfd", + "signalfd4", + "sigprocmask", + "sigreturn", + "socketcall", + "socketpair", + "splice", + "stat", + "stat64", + "statfs", + "statfs64", + "statx", + "symlink", + "symlinkat", + "sync", + "sync_file_range", + "syncfs", + "sysinfo", + "tee", + "tgkill", + "time", + "timer_create", + "timer_delete", + "timer_getoverrun", + "timer_gettime", + "timer_gettime64", + "timer_settime", + "timer_settime64", + "timerfd_create", + "timerfd_gettime", + "timerfd_gettime64", + "timerfd_settime", + "timerfd_settime64", + "times", + "tkill", + "truncate", + "truncate64", + "ugetrlimit", + "umask", + "uname", + "unlink", + "unlinkat", + "utime", + "utimensat", + "utimensat_time64", + "utimes", + "vfork", + "vmsplice", + "wait4", + "waitid", + "waitpid", + "write", + "writev" + ] + }, + { + "action": "SCMP_ACT_ALLOW", + "names": [ + "dup3" + ], + "args": [ + { + "index": 0, + "value": 1, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "action": "SCMP_ACT_ALLOW", + "names": [ + "dup3" + ], + "args": [ + { + "index": 0, + "value": 2, + "op": "SCMP_CMP_GT" + } + ] + }, + { + "action": "SCMP_ACT_ALLOW", + "names": [ + "kcmp" + ], + "args": [ + { + "index": 0, + "value": 0, + "op": "SCMP_CMP_EQ" + }, + { + "index": 1, + "value": 1337, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "action": "SCMP_ACT_ALLOW", + "names": [ + "process_vm_readv" + ], + "args": [ + { + "index": 0, + "value": 100, + "op": "SCMP_CMP_EQ" + }, + { + "index": 0, + "value": 9001, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "action": "SCMP_ACT_ERRNO", + "errnoRet": 55, + "names": [ + "process_vm_writev" + ], + "args": [ + { + "index": 0, + "value": 1337, + "op": "SCMP_CMP_EQ" + }, + { + "index": 0, + "value": 2020, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "action": "SCMP_ACT_ALLOW", + "names": [ + "clone" + ], + "args": [ + { + "index": 0, + "value": 2114060288, + "op": "SCMP_CMP_MASKED_EQ" + } + ] + }, + { + "action": "SCMP_ACT_ALLOW", + "names": [ + "socket" + ], + "args": [ + { + "index": 2, + "value": 3840, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ] + } + ] +} + diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS new file mode 100644 index 00000000000..15167cd746c --- /dev/null +++ b/vendor/golang.org/x/net/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS new file mode 100644 index 00000000000..1c4577e9680 --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/vendor/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS new file mode 100644 index 00000000000..733099041f8 --- /dev/null +++ b/vendor/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/bpf/asm.go b/vendor/golang.org/x/net/bpf/asm.go new file mode 100644 index 00000000000..15e21b18122 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/asm.go @@ -0,0 +1,41 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import "fmt" + +// Assemble converts insts into raw instructions suitable for loading +// into a BPF virtual machine. +// +// Currently, no optimization is attempted, the assembled program flow +// is exactly as provided. +func Assemble(insts []Instruction) ([]RawInstruction, error) { + ret := make([]RawInstruction, len(insts)) + var err error + for i, inst := range insts { + ret[i], err = inst.Assemble() + if err != nil { + return nil, fmt.Errorf("assembling instruction %d: %s", i+1, err) + } + } + return ret, nil +} + +// Disassemble attempts to parse raw back into +// Instructions. Unrecognized RawInstructions are assumed to be an +// extension not implemented by this package, and are passed through +// unchanged to the output. The allDecoded value reports whether insts +// contains no RawInstructions. +func Disassemble(raw []RawInstruction) (insts []Instruction, allDecoded bool) { + insts = make([]Instruction, len(raw)) + allDecoded = true + for i, r := range raw { + insts[i] = r.Disassemble() + if _, ok := insts[i].(RawInstruction); ok { + allDecoded = false + } + } + return insts, allDecoded +} diff --git a/vendor/golang.org/x/net/bpf/constants.go b/vendor/golang.org/x/net/bpf/constants.go new file mode 100644 index 00000000000..12f3ee835af --- /dev/null +++ b/vendor/golang.org/x/net/bpf/constants.go @@ -0,0 +1,222 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +// A Register is a register of the BPF virtual machine. +type Register uint16 + +const ( + // RegA is the accumulator register. RegA is always the + // destination register of ALU operations. + RegA Register = iota + // RegX is the indirection register, used by LoadIndirect + // operations. + RegX +) + +// An ALUOp is an arithmetic or logic operation. +type ALUOp uint16 + +// ALU binary operation types. +const ( + ALUOpAdd ALUOp = iota << 4 + ALUOpSub + ALUOpMul + ALUOpDiv + ALUOpOr + ALUOpAnd + ALUOpShiftLeft + ALUOpShiftRight + aluOpNeg // Not exported because it's the only unary ALU operation, and gets its own instruction type. + ALUOpMod + ALUOpXor +) + +// A JumpTest is a comparison operator used in conditional jumps. +type JumpTest uint16 + +// Supported operators for conditional jumps. +// K can be RegX for JumpIfX +const ( + // K == A + JumpEqual JumpTest = iota + // K != A + JumpNotEqual + // K > A + JumpGreaterThan + // K < A + JumpLessThan + // K >= A + JumpGreaterOrEqual + // K <= A + JumpLessOrEqual + // K & A != 0 + JumpBitsSet + // K & A == 0 + JumpBitsNotSet +) + +// An Extension is a function call provided by the kernel that +// performs advanced operations that are expensive or impossible +// within the BPF virtual machine. +// +// Extensions are only implemented by the Linux kernel. +// +// TODO: should we prune this list? Some of these extensions seem +// either broken or near-impossible to use correctly, whereas other +// (len, random, ifindex) are quite useful. +type Extension int + +// Extension functions available in the Linux kernel. +const ( + // extOffset is the negative maximum number of instructions used + // to load instructions by overloading the K argument. + extOffset = -0x1000 + // ExtLen returns the length of the packet. + ExtLen Extension = 1 + // ExtProto returns the packet's L3 protocol type. + ExtProto Extension = 0 + // ExtType returns the packet's type (skb->pkt_type in the kernel) + // + // TODO: better documentation. How nice an API do we want to + // provide for these esoteric extensions? + ExtType Extension = 4 + // ExtPayloadOffset returns the offset of the packet payload, or + // the first protocol header that the kernel does not know how to + // parse. + ExtPayloadOffset Extension = 52 + // ExtInterfaceIndex returns the index of the interface on which + // the packet was received. + ExtInterfaceIndex Extension = 8 + // ExtNetlinkAttr returns the netlink attribute of type X at + // offset A. + ExtNetlinkAttr Extension = 12 + // ExtNetlinkAttrNested returns the nested netlink attribute of + // type X at offset A. + ExtNetlinkAttrNested Extension = 16 + // ExtMark returns the packet's mark value. + ExtMark Extension = 20 + // ExtQueue returns the packet's assigned hardware queue. + ExtQueue Extension = 24 + // ExtLinkLayerType returns the packet's hardware address type + // (e.g. Ethernet, Infiniband). + ExtLinkLayerType Extension = 28 + // ExtRXHash returns the packets receive hash. + // + // TODO: figure out what this rxhash actually is. + ExtRXHash Extension = 32 + // ExtCPUID returns the ID of the CPU processing the current + // packet. + ExtCPUID Extension = 36 + // ExtVLANTag returns the packet's VLAN tag. + ExtVLANTag Extension = 44 + // ExtVLANTagPresent returns non-zero if the packet has a VLAN + // tag. + // + // TODO: I think this might be a lie: it reads bit 0x1000 of the + // VLAN header, which changed meaning in recent revisions of the + // spec - this extension may now return meaningless information. + ExtVLANTagPresent Extension = 48 + // ExtVLANProto returns 0x8100 if the frame has a VLAN header, + // 0x88a8 if the frame has a "Q-in-Q" double VLAN header, or some + // other value if no VLAN information is present. + ExtVLANProto Extension = 60 + // ExtRand returns a uniformly random uint32. + ExtRand Extension = 56 +) + +// The following gives names to various bit patterns used in opcode construction. + +const ( + opMaskCls uint16 = 0x7 + // opClsLoad masks + opMaskLoadDest = 0x01 + opMaskLoadWidth = 0x18 + opMaskLoadMode = 0xe0 + // opClsALU & opClsJump + opMaskOperand = 0x08 + opMaskOperator = 0xf0 +) + +const ( + // +---------------+-----------------+---+---+---+ + // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 0 | + // +---------------+-----------------+---+---+---+ + opClsLoadA uint16 = iota + // +---------------+-----------------+---+---+---+ + // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 1 | + // +---------------+-----------------+---+---+---+ + opClsLoadX + // +---+---+---+---+---+---+---+---+ + // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | + // +---+---+---+---+---+---+---+---+ + opClsStoreA + // +---+---+---+---+---+---+---+---+ + // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | + // +---+---+---+---+---+---+---+---+ + opClsStoreX + // +---------------+-----------------+---+---+---+ + // | Operator (4b) | OperandSrc (1b) | 1 | 0 | 0 | + // +---------------+-----------------+---+---+---+ + opClsALU + // +-----------------------------+---+---+---+---+ + // | TestOperator (4b) | 0 | 1 | 0 | 1 | + // +-----------------------------+---+---+---+---+ + opClsJump + // +---+-------------------------+---+---+---+---+ + // | 0 | 0 | 0 | RetSrc (1b) | 0 | 1 | 1 | 0 | + // +---+-------------------------+---+---+---+---+ + opClsReturn + // +---+-------------------------+---+---+---+---+ + // | 0 | 0 | 0 | TXAorTAX (1b) | 0 | 1 | 1 | 1 | + // +---+-------------------------+---+---+---+---+ + opClsMisc +) + +const ( + opAddrModeImmediate uint16 = iota << 5 + opAddrModeAbsolute + opAddrModeIndirect + opAddrModeScratch + opAddrModePacketLen // actually an extension, not an addressing mode. + opAddrModeMemShift +) + +const ( + opLoadWidth4 uint16 = iota << 3 + opLoadWidth2 + opLoadWidth1 +) + +// Operand for ALU and Jump instructions +type opOperand uint16 + +// Supported operand sources. +const ( + opOperandConstant opOperand = iota << 3 + opOperandX +) + +// An jumpOp is a conditional jump condition. +type jumpOp uint16 + +// Supported jump conditions. +const ( + opJumpAlways jumpOp = iota << 4 + opJumpEqual + opJumpGT + opJumpGE + opJumpSet +) + +const ( + opRetSrcConstant uint16 = iota << 4 + opRetSrcA +) + +const ( + opMiscTAX = 0x00 + opMiscTXA = 0x80 +) diff --git a/vendor/golang.org/x/net/bpf/doc.go b/vendor/golang.org/x/net/bpf/doc.go new file mode 100644 index 00000000000..ae62feb5341 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/doc.go @@ -0,0 +1,82 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + +Package bpf implements marshaling and unmarshaling of programs for the +Berkeley Packet Filter virtual machine, and provides a Go implementation +of the virtual machine. + +BPF's main use is to specify a packet filter for network taps, so that +the kernel doesn't have to expensively copy every packet it sees to +userspace. However, it's been repurposed to other areas where running +user code in-kernel is needed. For example, Linux's seccomp uses BPF +to apply security policies to system calls. For simplicity, this +documentation refers only to packets, but other uses of BPF have their +own data payloads. + +BPF programs run in a restricted virtual machine. It has almost no +access to kernel functions, and while conditional branches are +allowed, they can only jump forwards, to guarantee that there are no +infinite loops. + +The virtual machine + +The BPF VM is an accumulator machine. Its main register, called +register A, is an implicit source and destination in all arithmetic +and logic operations. The machine also has 16 scratch registers for +temporary storage, and an indirection register (register X) for +indirect memory access. All registers are 32 bits wide. + +Each run of a BPF program is given one packet, which is placed in the +VM's read-only "main memory". LoadAbsolute and LoadIndirect +instructions can fetch up to 32 bits at a time into register A for +examination. + +The goal of a BPF program is to produce and return a verdict (uint32), +which tells the kernel what to do with the packet. In the context of +packet filtering, the returned value is the number of bytes of the +packet to forward to userspace, or 0 to ignore the packet. Other +contexts like seccomp define their own return values. + +In order to simplify programs, attempts to read past the end of the +packet terminate the program execution with a verdict of 0 (ignore +packet). This means that the vast majority of BPF programs don't need +to do any explicit bounds checking. + +In addition to the bytes of the packet, some BPF programs have access +to extensions, which are essentially calls to kernel utility +functions. Currently, the only extensions supported by this package +are the Linux packet filter extensions. + +Examples + +This packet filter selects all ARP packets. + + bpf.Assemble([]bpf.Instruction{ + // Load "EtherType" field from the ethernet header. + bpf.LoadAbsolute{Off: 12, Size: 2}, + // Skip over the next instruction if EtherType is not ARP. + bpf.JumpIf{Cond: bpf.JumpNotEqual, Val: 0x0806, SkipTrue: 1}, + // Verdict is "send up to 4k of the packet to userspace." + bpf.RetConstant{Val: 4096}, + // Verdict is "ignore packet." + bpf.RetConstant{Val: 0}, + }) + +This packet filter captures a random 1% sample of traffic. + + bpf.Assemble([]bpf.Instruction{ + // Get a 32-bit random number from the Linux kernel. + bpf.LoadExtension{Num: bpf.ExtRand}, + // 1% dice roll? + bpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2^32/100, SkipFalse: 1}, + // Capture. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + +*/ +package bpf // import "golang.org/x/net/bpf" diff --git a/vendor/golang.org/x/net/bpf/instructions.go b/vendor/golang.org/x/net/bpf/instructions.go new file mode 100644 index 00000000000..3cffcaa014e --- /dev/null +++ b/vendor/golang.org/x/net/bpf/instructions.go @@ -0,0 +1,726 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import "fmt" + +// An Instruction is one instruction executed by the BPF virtual +// machine. +type Instruction interface { + // Assemble assembles the Instruction into a RawInstruction. + Assemble() (RawInstruction, error) +} + +// A RawInstruction is a raw BPF virtual machine instruction. +type RawInstruction struct { + // Operation to execute. + Op uint16 + // For conditional jump instructions, the number of instructions + // to skip if the condition is true/false. + Jt uint8 + Jf uint8 + // Constant parameter. The meaning depends on the Op. + K uint32 +} + +// Assemble implements the Instruction Assemble method. +func (ri RawInstruction) Assemble() (RawInstruction, error) { return ri, nil } + +// Disassemble parses ri into an Instruction and returns it. If ri is +// not recognized by this package, ri itself is returned. +func (ri RawInstruction) Disassemble() Instruction { + switch ri.Op & opMaskCls { + case opClsLoadA, opClsLoadX: + reg := Register(ri.Op & opMaskLoadDest) + sz := 0 + switch ri.Op & opMaskLoadWidth { + case opLoadWidth4: + sz = 4 + case opLoadWidth2: + sz = 2 + case opLoadWidth1: + sz = 1 + default: + return ri + } + switch ri.Op & opMaskLoadMode { + case opAddrModeImmediate: + if sz != 4 { + return ri + } + return LoadConstant{Dst: reg, Val: ri.K} + case opAddrModeScratch: + if sz != 4 || ri.K > 15 { + return ri + } + return LoadScratch{Dst: reg, N: int(ri.K)} + case opAddrModeAbsolute: + if ri.K > extOffset+0xffffffff { + return LoadExtension{Num: Extension(-extOffset + ri.K)} + } + return LoadAbsolute{Size: sz, Off: ri.K} + case opAddrModeIndirect: + return LoadIndirect{Size: sz, Off: ri.K} + case opAddrModePacketLen: + if sz != 4 { + return ri + } + return LoadExtension{Num: ExtLen} + case opAddrModeMemShift: + return LoadMemShift{Off: ri.K} + default: + return ri + } + + case opClsStoreA: + if ri.Op != opClsStoreA || ri.K > 15 { + return ri + } + return StoreScratch{Src: RegA, N: int(ri.K)} + + case opClsStoreX: + if ri.Op != opClsStoreX || ri.K > 15 { + return ri + } + return StoreScratch{Src: RegX, N: int(ri.K)} + + case opClsALU: + switch op := ALUOp(ri.Op & opMaskOperator); op { + case ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor: + switch operand := opOperand(ri.Op & opMaskOperand); operand { + case opOperandX: + return ALUOpX{Op: op} + case opOperandConstant: + return ALUOpConstant{Op: op, Val: ri.K} + default: + return ri + } + case aluOpNeg: + return NegateA{} + default: + return ri + } + + case opClsJump: + switch op := jumpOp(ri.Op & opMaskOperator); op { + case opJumpAlways: + return Jump{Skip: ri.K} + case opJumpEqual, opJumpGT, opJumpGE, opJumpSet: + cond, skipTrue, skipFalse := jumpOpToTest(op, ri.Jt, ri.Jf) + switch operand := opOperand(ri.Op & opMaskOperand); operand { + case opOperandX: + return JumpIfX{Cond: cond, SkipTrue: skipTrue, SkipFalse: skipFalse} + case opOperandConstant: + return JumpIf{Cond: cond, Val: ri.K, SkipTrue: skipTrue, SkipFalse: skipFalse} + default: + return ri + } + default: + return ri + } + + case opClsReturn: + switch ri.Op { + case opClsReturn | opRetSrcA: + return RetA{} + case opClsReturn | opRetSrcConstant: + return RetConstant{Val: ri.K} + default: + return ri + } + + case opClsMisc: + switch ri.Op { + case opClsMisc | opMiscTAX: + return TAX{} + case opClsMisc | opMiscTXA: + return TXA{} + default: + return ri + } + + default: + panic("unreachable") // switch is exhaustive on the bit pattern + } +} + +func jumpOpToTest(op jumpOp, skipTrue uint8, skipFalse uint8) (JumpTest, uint8, uint8) { + var test JumpTest + + // Decode "fake" jump conditions that don't appear in machine code + // Ensures the Assemble -> Disassemble stage recreates the same instructions + // See https://github.com/golang/go/issues/18470 + if skipTrue == 0 { + switch op { + case opJumpEqual: + test = JumpNotEqual + case opJumpGT: + test = JumpLessOrEqual + case opJumpGE: + test = JumpLessThan + case opJumpSet: + test = JumpBitsNotSet + } + + return test, skipFalse, 0 + } + + switch op { + case opJumpEqual: + test = JumpEqual + case opJumpGT: + test = JumpGreaterThan + case opJumpGE: + test = JumpGreaterOrEqual + case opJumpSet: + test = JumpBitsSet + } + + return test, skipTrue, skipFalse +} + +// LoadConstant loads Val into register Dst. +type LoadConstant struct { + Dst Register + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadConstant) Assemble() (RawInstruction, error) { + return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val) +} + +// String returns the instruction in assembler notation. +func (a LoadConstant) String() string { + switch a.Dst { + case RegA: + return fmt.Sprintf("ld #%d", a.Val) + case RegX: + return fmt.Sprintf("ldx #%d", a.Val) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadScratch loads scratch[N] into register Dst. +type LoadScratch struct { + Dst Register + N int // 0-15 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadScratch) Assemble() (RawInstruction, error) { + if a.N < 0 || a.N > 15 { + return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) + } + return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N)) +} + +// String returns the instruction in assembler notation. +func (a LoadScratch) String() string { + switch a.Dst { + case RegA: + return fmt.Sprintf("ld M[%d]", a.N) + case RegX: + return fmt.Sprintf("ldx M[%d]", a.N) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadAbsolute loads packet[Off:Off+Size] as an integer value into +// register A. +type LoadAbsolute struct { + Off uint32 + Size int // 1, 2 or 4 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadAbsolute) Assemble() (RawInstruction, error) { + return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off) +} + +// String returns the instruction in assembler notation. +func (a LoadAbsolute) String() string { + switch a.Size { + case 1: // byte + return fmt.Sprintf("ldb [%d]", a.Off) + case 2: // half word + return fmt.Sprintf("ldh [%d]", a.Off) + case 4: // word + if a.Off > extOffset+0xffffffff { + return LoadExtension{Num: Extension(a.Off + 0x1000)}.String() + } + return fmt.Sprintf("ld [%d]", a.Off) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value +// into register A. +type LoadIndirect struct { + Off uint32 + Size int // 1, 2 or 4 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadIndirect) Assemble() (RawInstruction, error) { + return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off) +} + +// String returns the instruction in assembler notation. +func (a LoadIndirect) String() string { + switch a.Size { + case 1: // byte + return fmt.Sprintf("ldb [x + %d]", a.Off) + case 2: // half word + return fmt.Sprintf("ldh [x + %d]", a.Off) + case 4: // word + return fmt.Sprintf("ld [x + %d]", a.Off) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadMemShift multiplies the first 4 bits of the byte at packet[Off] +// by 4 and stores the result in register X. +// +// This instruction is mainly useful to load into X the length of an +// IPv4 packet header in a single instruction, rather than have to do +// the arithmetic on the header's first byte by hand. +type LoadMemShift struct { + Off uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadMemShift) Assemble() (RawInstruction, error) { + return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off) +} + +// String returns the instruction in assembler notation. +func (a LoadMemShift) String() string { + return fmt.Sprintf("ldx 4*([%d]&0xf)", a.Off) +} + +// LoadExtension invokes a linux-specific extension and stores the +// result in register A. +type LoadExtension struct { + Num Extension +} + +// Assemble implements the Instruction Assemble method. +func (a LoadExtension) Assemble() (RawInstruction, error) { + if a.Num == ExtLen { + return assembleLoad(RegA, 4, opAddrModePacketLen, 0) + } + return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(extOffset+a.Num)) +} + +// String returns the instruction in assembler notation. +func (a LoadExtension) String() string { + switch a.Num { + case ExtLen: + return "ld #len" + case ExtProto: + return "ld #proto" + case ExtType: + return "ld #type" + case ExtPayloadOffset: + return "ld #poff" + case ExtInterfaceIndex: + return "ld #ifidx" + case ExtNetlinkAttr: + return "ld #nla" + case ExtNetlinkAttrNested: + return "ld #nlan" + case ExtMark: + return "ld #mark" + case ExtQueue: + return "ld #queue" + case ExtLinkLayerType: + return "ld #hatype" + case ExtRXHash: + return "ld #rxhash" + case ExtCPUID: + return "ld #cpu" + case ExtVLANTag: + return "ld #vlan_tci" + case ExtVLANTagPresent: + return "ld #vlan_avail" + case ExtVLANProto: + return "ld #vlan_tpid" + case ExtRand: + return "ld #rand" + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// StoreScratch stores register Src into scratch[N]. +type StoreScratch struct { + Src Register + N int // 0-15 +} + +// Assemble implements the Instruction Assemble method. +func (a StoreScratch) Assemble() (RawInstruction, error) { + if a.N < 0 || a.N > 15 { + return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) + } + var op uint16 + switch a.Src { + case RegA: + op = opClsStoreA + case RegX: + op = opClsStoreX + default: + return RawInstruction{}, fmt.Errorf("invalid source register %v", a.Src) + } + + return RawInstruction{ + Op: op, + K: uint32(a.N), + }, nil +} + +// String returns the instruction in assembler notation. +func (a StoreScratch) String() string { + switch a.Src { + case RegA: + return fmt.Sprintf("st M[%d]", a.N) + case RegX: + return fmt.Sprintf("stx M[%d]", a.N) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// ALUOpConstant executes A = A Val. +type ALUOpConstant struct { + Op ALUOp + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a ALUOpConstant) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | uint16(opOperandConstant) | uint16(a.Op), + K: a.Val, + }, nil +} + +// String returns the instruction in assembler notation. +func (a ALUOpConstant) String() string { + switch a.Op { + case ALUOpAdd: + return fmt.Sprintf("add #%d", a.Val) + case ALUOpSub: + return fmt.Sprintf("sub #%d", a.Val) + case ALUOpMul: + return fmt.Sprintf("mul #%d", a.Val) + case ALUOpDiv: + return fmt.Sprintf("div #%d", a.Val) + case ALUOpMod: + return fmt.Sprintf("mod #%d", a.Val) + case ALUOpAnd: + return fmt.Sprintf("and #%d", a.Val) + case ALUOpOr: + return fmt.Sprintf("or #%d", a.Val) + case ALUOpXor: + return fmt.Sprintf("xor #%d", a.Val) + case ALUOpShiftLeft: + return fmt.Sprintf("lsh #%d", a.Val) + case ALUOpShiftRight: + return fmt.Sprintf("rsh #%d", a.Val) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// ALUOpX executes A = A X +type ALUOpX struct { + Op ALUOp +} + +// Assemble implements the Instruction Assemble method. +func (a ALUOpX) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | uint16(opOperandX) | uint16(a.Op), + }, nil +} + +// String returns the instruction in assembler notation. +func (a ALUOpX) String() string { + switch a.Op { + case ALUOpAdd: + return "add x" + case ALUOpSub: + return "sub x" + case ALUOpMul: + return "mul x" + case ALUOpDiv: + return "div x" + case ALUOpMod: + return "mod x" + case ALUOpAnd: + return "and x" + case ALUOpOr: + return "or x" + case ALUOpXor: + return "xor x" + case ALUOpShiftLeft: + return "lsh x" + case ALUOpShiftRight: + return "rsh x" + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// NegateA executes A = -A. +type NegateA struct{} + +// Assemble implements the Instruction Assemble method. +func (a NegateA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | uint16(aluOpNeg), + }, nil +} + +// String returns the instruction in assembler notation. +func (a NegateA) String() string { + return fmt.Sprintf("neg") +} + +// Jump skips the following Skip instructions in the program. +type Jump struct { + Skip uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a Jump) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsJump | uint16(opJumpAlways), + K: a.Skip, + }, nil +} + +// String returns the instruction in assembler notation. +func (a Jump) String() string { + return fmt.Sprintf("ja %d", a.Skip) +} + +// JumpIf skips the following Skip instructions in the program if A +// Val is true. +type JumpIf struct { + Cond JumpTest + Val uint32 + SkipTrue uint8 + SkipFalse uint8 +} + +// Assemble implements the Instruction Assemble method. +func (a JumpIf) Assemble() (RawInstruction, error) { + return jumpToRaw(a.Cond, opOperandConstant, a.Val, a.SkipTrue, a.SkipFalse) +} + +// String returns the instruction in assembler notation. +func (a JumpIf) String() string { + return jumpToString(a.Cond, fmt.Sprintf("#%d", a.Val), a.SkipTrue, a.SkipFalse) +} + +// JumpIfX skips the following Skip instructions in the program if A +// X is true. +type JumpIfX struct { + Cond JumpTest + SkipTrue uint8 + SkipFalse uint8 +} + +// Assemble implements the Instruction Assemble method. +func (a JumpIfX) Assemble() (RawInstruction, error) { + return jumpToRaw(a.Cond, opOperandX, 0, a.SkipTrue, a.SkipFalse) +} + +// String returns the instruction in assembler notation. +func (a JumpIfX) String() string { + return jumpToString(a.Cond, "x", a.SkipTrue, a.SkipFalse) +} + +// jumpToRaw assembles a jump instruction into a RawInstruction +func jumpToRaw(test JumpTest, operand opOperand, k uint32, skipTrue, skipFalse uint8) (RawInstruction, error) { + var ( + cond jumpOp + flip bool + ) + switch test { + case JumpEqual: + cond = opJumpEqual + case JumpNotEqual: + cond, flip = opJumpEqual, true + case JumpGreaterThan: + cond = opJumpGT + case JumpLessThan: + cond, flip = opJumpGE, true + case JumpGreaterOrEqual: + cond = opJumpGE + case JumpLessOrEqual: + cond, flip = opJumpGT, true + case JumpBitsSet: + cond = opJumpSet + case JumpBitsNotSet: + cond, flip = opJumpSet, true + default: + return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", test) + } + jt, jf := skipTrue, skipFalse + if flip { + jt, jf = jf, jt + } + return RawInstruction{ + Op: opClsJump | uint16(cond) | uint16(operand), + Jt: jt, + Jf: jf, + K: k, + }, nil +} + +// jumpToString converts a jump instruction to assembler notation +func jumpToString(cond JumpTest, operand string, skipTrue, skipFalse uint8) string { + switch cond { + // K == A + case JumpEqual: + return conditionalJump(operand, skipTrue, skipFalse, "jeq", "jneq") + // K != A + case JumpNotEqual: + return fmt.Sprintf("jneq %s,%d", operand, skipTrue) + // K > A + case JumpGreaterThan: + return conditionalJump(operand, skipTrue, skipFalse, "jgt", "jle") + // K < A + case JumpLessThan: + return fmt.Sprintf("jlt %s,%d", operand, skipTrue) + // K >= A + case JumpGreaterOrEqual: + return conditionalJump(operand, skipTrue, skipFalse, "jge", "jlt") + // K <= A + case JumpLessOrEqual: + return fmt.Sprintf("jle %s,%d", operand, skipTrue) + // K & A != 0 + case JumpBitsSet: + if skipFalse > 0 { + return fmt.Sprintf("jset %s,%d,%d", operand, skipTrue, skipFalse) + } + return fmt.Sprintf("jset %s,%d", operand, skipTrue) + // K & A == 0, there is no assembler instruction for JumpBitNotSet, use JumpBitSet and invert skips + case JumpBitsNotSet: + return jumpToString(JumpBitsSet, operand, skipFalse, skipTrue) + default: + return fmt.Sprintf("unknown JumpTest %#v", cond) + } +} + +func conditionalJump(operand string, skipTrue, skipFalse uint8, positiveJump, negativeJump string) string { + if skipTrue > 0 { + if skipFalse > 0 { + return fmt.Sprintf("%s %s,%d,%d", positiveJump, operand, skipTrue, skipFalse) + } + return fmt.Sprintf("%s %s,%d", positiveJump, operand, skipTrue) + } + return fmt.Sprintf("%s %s,%d", negativeJump, operand, skipFalse) +} + +// RetA exits the BPF program, returning the value of register A. +type RetA struct{} + +// Assemble implements the Instruction Assemble method. +func (a RetA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsReturn | opRetSrcA, + }, nil +} + +// String returns the instruction in assembler notation. +func (a RetA) String() string { + return fmt.Sprintf("ret a") +} + +// RetConstant exits the BPF program, returning a constant value. +type RetConstant struct { + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a RetConstant) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsReturn | opRetSrcConstant, + K: a.Val, + }, nil +} + +// String returns the instruction in assembler notation. +func (a RetConstant) String() string { + return fmt.Sprintf("ret #%d", a.Val) +} + +// TXA copies the value of register X to register A. +type TXA struct{} + +// Assemble implements the Instruction Assemble method. +func (a TXA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsMisc | opMiscTXA, + }, nil +} + +// String returns the instruction in assembler notation. +func (a TXA) String() string { + return fmt.Sprintf("txa") +} + +// TAX copies the value of register A to register X. +type TAX struct{} + +// Assemble implements the Instruction Assemble method. +func (a TAX) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsMisc | opMiscTAX, + }, nil +} + +// String returns the instruction in assembler notation. +func (a TAX) String() string { + return fmt.Sprintf("tax") +} + +func assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) { + var ( + cls uint16 + sz uint16 + ) + switch dst { + case RegA: + cls = opClsLoadA + case RegX: + cls = opClsLoadX + default: + return RawInstruction{}, fmt.Errorf("invalid target register %v", dst) + } + switch loadSize { + case 1: + sz = opLoadWidth1 + case 2: + sz = opLoadWidth2 + case 4: + sz = opLoadWidth4 + default: + return RawInstruction{}, fmt.Errorf("invalid load byte length %d", sz) + } + return RawInstruction{ + Op: cls | sz | mode, + K: k, + }, nil +} diff --git a/vendor/golang.org/x/net/bpf/setter.go b/vendor/golang.org/x/net/bpf/setter.go new file mode 100644 index 00000000000..43e35f0ac24 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/setter.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +// A Setter is a type which can attach a compiled BPF filter to itself. +type Setter interface { + SetBPF(filter []RawInstruction) error +} diff --git a/vendor/golang.org/x/net/bpf/vm.go b/vendor/golang.org/x/net/bpf/vm.go new file mode 100644 index 00000000000..73f57f1f72e --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm.go @@ -0,0 +1,150 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "errors" + "fmt" +) + +// A VM is an emulated BPF virtual machine. +type VM struct { + filter []Instruction +} + +// NewVM returns a new VM using the input BPF program. +func NewVM(filter []Instruction) (*VM, error) { + if len(filter) == 0 { + return nil, errors.New("one or more Instructions must be specified") + } + + for i, ins := range filter { + check := len(filter) - (i + 1) + switch ins := ins.(type) { + // Check for out-of-bounds jumps in instructions + case Jump: + if check <= int(ins.Skip) { + return nil, fmt.Errorf("cannot jump %d instructions; jumping past program bounds", ins.Skip) + } + case JumpIf: + if check <= int(ins.SkipTrue) { + return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue) + } + if check <= int(ins.SkipFalse) { + return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse) + } + case JumpIfX: + if check <= int(ins.SkipTrue) { + return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue) + } + if check <= int(ins.SkipFalse) { + return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse) + } + // Check for division or modulus by zero + case ALUOpConstant: + if ins.Val != 0 { + break + } + + switch ins.Op { + case ALUOpDiv, ALUOpMod: + return nil, errors.New("cannot divide by zero using ALUOpConstant") + } + // Check for unknown extensions + case LoadExtension: + switch ins.Num { + case ExtLen: + default: + return nil, fmt.Errorf("extension %d not implemented", ins.Num) + } + } + } + + // Make sure last instruction is a return instruction + switch filter[len(filter)-1].(type) { + case RetA, RetConstant: + default: + return nil, errors.New("BPF program must end with RetA or RetConstant") + } + + // Though our VM works using disassembled instructions, we + // attempt to assemble the input filter anyway to ensure it is compatible + // with an operating system VM. + _, err := Assemble(filter) + + return &VM{ + filter: filter, + }, err +} + +// Run runs the VM's BPF program against the input bytes. +// Run returns the number of bytes accepted by the BPF program, and any errors +// which occurred while processing the program. +func (v *VM) Run(in []byte) (int, error) { + var ( + // Registers of the virtual machine + regA uint32 + regX uint32 + regScratch [16]uint32 + + // OK is true if the program should continue processing the next + // instruction, or false if not, causing the loop to break + ok = true + ) + + // TODO(mdlayher): implement: + // - NegateA: + // - would require a change from uint32 registers to int32 + // registers + + // TODO(mdlayher): add interop tests that check signedness of ALU + // operations against kernel implementation, and make sure Go + // implementation matches behavior + + for i := 0; i < len(v.filter) && ok; i++ { + ins := v.filter[i] + + switch ins := ins.(type) { + case ALUOpConstant: + regA = aluOpConstant(ins, regA) + case ALUOpX: + regA, ok = aluOpX(ins, regA, regX) + case Jump: + i += int(ins.Skip) + case JumpIf: + jump := jumpIf(ins, regA) + i += jump + case JumpIfX: + jump := jumpIfX(ins, regA, regX) + i += jump + case LoadAbsolute: + regA, ok = loadAbsolute(ins, in) + case LoadConstant: + regA, regX = loadConstant(ins, regA, regX) + case LoadExtension: + regA = loadExtension(ins, in) + case LoadIndirect: + regA, ok = loadIndirect(ins, in, regX) + case LoadMemShift: + regX, ok = loadMemShift(ins, in) + case LoadScratch: + regA, regX = loadScratch(ins, regScratch, regA, regX) + case RetA: + return int(regA), nil + case RetConstant: + return int(ins.Val), nil + case StoreScratch: + regScratch = storeScratch(ins, regScratch, regA, regX) + case TAX: + regX = regA + case TXA: + regA = regX + default: + return 0, fmt.Errorf("unknown Instruction at index %d: %T", i, ins) + } + } + + return 0, nil +} diff --git a/vendor/golang.org/x/net/bpf/vm_instructions.go b/vendor/golang.org/x/net/bpf/vm_instructions.go new file mode 100644 index 00000000000..cf8947c3327 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_instructions.go @@ -0,0 +1,182 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "encoding/binary" + "fmt" +) + +func aluOpConstant(ins ALUOpConstant, regA uint32) uint32 { + return aluOpCommon(ins.Op, regA, ins.Val) +} + +func aluOpX(ins ALUOpX, regA uint32, regX uint32) (uint32, bool) { + // Guard against division or modulus by zero by terminating + // the program, as the OS BPF VM does + if regX == 0 { + switch ins.Op { + case ALUOpDiv, ALUOpMod: + return 0, false + } + } + + return aluOpCommon(ins.Op, regA, regX), true +} + +func aluOpCommon(op ALUOp, regA uint32, value uint32) uint32 { + switch op { + case ALUOpAdd: + return regA + value + case ALUOpSub: + return regA - value + case ALUOpMul: + return regA * value + case ALUOpDiv: + // Division by zero not permitted by NewVM and aluOpX checks + return regA / value + case ALUOpOr: + return regA | value + case ALUOpAnd: + return regA & value + case ALUOpShiftLeft: + return regA << value + case ALUOpShiftRight: + return regA >> value + case ALUOpMod: + // Modulus by zero not permitted by NewVM and aluOpX checks + return regA % value + case ALUOpXor: + return regA ^ value + default: + return regA + } +} + +func jumpIf(ins JumpIf, regA uint32) int { + return jumpIfCommon(ins.Cond, ins.SkipTrue, ins.SkipFalse, regA, ins.Val) +} + +func jumpIfX(ins JumpIfX, regA uint32, regX uint32) int { + return jumpIfCommon(ins.Cond, ins.SkipTrue, ins.SkipFalse, regA, regX) +} + +func jumpIfCommon(cond JumpTest, skipTrue, skipFalse uint8, regA uint32, value uint32) int { + var ok bool + + switch cond { + case JumpEqual: + ok = regA == value + case JumpNotEqual: + ok = regA != value + case JumpGreaterThan: + ok = regA > value + case JumpLessThan: + ok = regA < value + case JumpGreaterOrEqual: + ok = regA >= value + case JumpLessOrEqual: + ok = regA <= value + case JumpBitsSet: + ok = (regA & value) != 0 + case JumpBitsNotSet: + ok = (regA & value) == 0 + } + + if ok { + return int(skipTrue) + } + + return int(skipFalse) +} + +func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) { + offset := int(ins.Off) + size := int(ins.Size) + + return loadCommon(in, offset, size) +} + +func loadConstant(ins LoadConstant, regA uint32, regX uint32) (uint32, uint32) { + switch ins.Dst { + case RegA: + regA = ins.Val + case RegX: + regX = ins.Val + } + + return regA, regX +} + +func loadExtension(ins LoadExtension, in []byte) uint32 { + switch ins.Num { + case ExtLen: + return uint32(len(in)) + default: + panic(fmt.Sprintf("unimplemented extension: %d", ins.Num)) + } +} + +func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) { + offset := int(ins.Off) + int(regX) + size := int(ins.Size) + + return loadCommon(in, offset, size) +} + +func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) { + offset := int(ins.Off) + + // Size of LoadMemShift is always 1 byte + if !inBounds(len(in), offset, 1) { + return 0, false + } + + // Mask off high 4 bits and multiply low 4 bits by 4 + return uint32(in[offset]&0x0f) * 4, true +} + +func inBounds(inLen int, offset int, size int) bool { + return offset+size <= inLen +} + +func loadCommon(in []byte, offset int, size int) (uint32, bool) { + if !inBounds(len(in), offset, size) { + return 0, false + } + + switch size { + case 1: + return uint32(in[offset]), true + case 2: + return uint32(binary.BigEndian.Uint16(in[offset : offset+size])), true + case 4: + return uint32(binary.BigEndian.Uint32(in[offset : offset+size])), true + default: + panic(fmt.Sprintf("invalid load size: %d", size)) + } +} + +func loadScratch(ins LoadScratch, regScratch [16]uint32, regA uint32, regX uint32) (uint32, uint32) { + switch ins.Dst { + case RegA: + regA = regScratch[ins.N] + case RegX: + regX = regScratch[ins.N] + } + + return regA, regX +} + +func storeScratch(ins StoreScratch, regScratch [16]uint32, regA uint32, regX uint32) [16]uint32 { + switch ins.Src { + case RegA: + regScratch[ins.N] = regA + case RegX: + regScratch[ins.N] = regX + } + + return regScratch +} diff --git a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s index 06f84b85558..6b4027b33fd 100644 --- a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_386.s b/vendor/golang.org/x/sys/unix/asm_darwin_386.s index 8a7278319e3..8a06b87d715 100644 --- a/vendor/golang.org/x/sys/unix/asm_darwin_386.s +++ b/vendor/golang.org/x/sys/unix/asm_darwin_386.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s b/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s index 6321421f272..f2397fde554 100644 --- a/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm.s b/vendor/golang.org/x/sys/unix/asm_darwin_arm.s index 333242d5061..c9e6b6fc8b5 100644 --- a/vendor/golang.org/x/sys/unix/asm_darwin_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_darwin_arm.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc // +build arm,darwin #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s b/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s index 97e01743718..89843f8f4b2 100644 --- a/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc // +build arm64,darwin #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s index 603dd5728c4..27674e1cafd 100644 --- a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_386.s b/vendor/golang.org/x/sys/unix/asm_freebsd_386.s index c9a0a260156..49f0ac2364c 100644 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_freebsd_386.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s index 35172477c86..f2dfc57b836 100644 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s b/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s index 9227c875bfe..6d740db2c0c 100644 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s index d9318cbf034..a8f5a29b35f 100644 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s index 448bebbb59a..0655ecbfbbe 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_386.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s index c6468a95880..bc3fb6ac3ed 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s index cf0f3575c13..55b13c7ba45 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s index afe6fdf6b11..22a83d8e3fa 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s @@ -4,7 +4,7 @@ // +build linux // +build arm64 -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s index ab9d63831a7..dc222b90ce7 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s @@ -4,7 +4,7 @@ // +build linux // +build mips64 mips64le -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s index 99e5399045c..d333f13cff3 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s @@ -4,7 +4,7 @@ // +build linux // +build mips mipsle -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s index 88f71255781..459a629c273 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s @@ -4,7 +4,7 @@ // +build linux // +build ppc64 ppc64le -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s index 3cfefed2ec0..04d38497c6d 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build riscv64,!gccgo +// +build riscv64,gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s index a5a863c6bd7..cc303989e17 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s @@ -4,7 +4,7 @@ // +build s390x // +build linux -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_386.s b/vendor/golang.org/x/sys/unix/asm_netbsd_386.s index 48bdcd7632a..ae7b498d506 100644 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_netbsd_386.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s index 2ede05c72f0..e57367c17aa 100644 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s index e8928571c45..d7da175e1a3 100644 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s index 6f98ba5a370..e7cbe1904c4 100644 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_386.s b/vendor/golang.org/x/sys/unix/asm_openbsd_386.s index 00576f3c835..2f00b0310f4 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_386.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s index 790ef77f86e..07632c99cea 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s index 469bfa10039..73e997320fc 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s index 0cedea3d39d..c47302aa46d 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s index 567a4763c88..47c93fcb6c7 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s index ded8260f3e4..1f2c755a720 100644 --- a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go index 5e9269063f5..86781eac221 100644 --- a/vendor/golang.org/x/sys/unix/endian_big.go +++ b/vendor/golang.org/x/sys/unix/endian_big.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // -// +build ppc64 s390x mips mips64 +// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index bcdb5d30eb9..8822d8541f2 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // -// +build 386 amd64 amd64p32 arm arm64 ppc64le mipsle mips64le riscv64 +// +build 386 amd64 amd64p32 alpha arm arm64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 0c9a5c44bbe..c0f9f2d523f 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -225,6 +225,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -561,6 +562,7 @@ ccflags="$@" $2 ~ /^CRYPTO_/ || $2 ~ /^TIPC_/ || $2 ~ /^DEVLINK_/ || + $2 ~ /^LWTUNNEL_IP/ || $2 !~ "WMESGLEN" && $2 ~ /^W[A-Z0-9]+$/ || $2 ~/^PPPIOC/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 842ab5acde8..a4f2944a24e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -105,6 +105,19 @@ func Pipe(p []int) (err error) { return } +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) error { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err := pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return err +} + //sys extpread(fd int, p []byte, flags int, offset int64) (n int, err error) func Pread(fd int, p []byte, offset int64) (n int, err error) { return extpread(fd, p, 0, offset) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go index 21a4946ba55..baa771f8ad9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build amd64,linux -// +build !gccgo +// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go index c26e6ec2314..9edf3961b01 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux,!gccgo +// +build linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go index 070bd38994e..90e33d8cf75 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux,!gccgo,386 +// +build linux,gc,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go index 8c514c95ed4..1a97baae732 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build arm,!gccgo,linux +// +build arm,gc,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go index 1c70d1b6902..87bd161cefc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build darwin dragonfly freebsd linux netbsd openbsd solaris -// +build !gccgo,!ppc64le,!ppc64 +// +build gc,!ppc64le,!ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go index 86dc765aba3..d36216c3ca7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go @@ -4,7 +4,7 @@ // +build linux // +build ppc64le ppc64 -// +build !gccgo +// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 2069fb861d1..b46110354df 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1217,6 +1217,12 @@ const ( LOOP_SET_STATUS_SETTABLE_FLAGS = 0xc LO_KEY_SIZE = 0x20 LO_NAME_SIZE = 0x40 + LWTUNNEL_IP6_MAX = 0x8 + LWTUNNEL_IP_MAX = 0x8 + LWTUNNEL_IP_OPTS_MAX = 0x3 + LWTUNNEL_IP_OPT_ERSPAN_MAX = 0x4 + LWTUNNEL_IP_OPT_GENEVE_MAX = 0x3 + LWTUNNEL_IP_OPT_VXLAN_MAX = 0x1 MADV_COLD = 0x14 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index 4b3a8ad7bec..0550da06d14 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -2,7 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. // +build aix,ppc64 -// +build !gccgo +// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index aebfe511ad5..1aaccd3615e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -362,6 +362,16 @@ func pipe() (r int, w int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func extpread(fd int, p []byte, flags int, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index a96ad4c299d..504ef131fb8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -2981,3 +2981,21 @@ type PPSKTime struct { Nsec int32 Flags uint32 } + +const ( + LWTUNNEL_ENCAP_NONE = 0x0 + LWTUNNEL_ENCAP_MPLS = 0x1 + LWTUNNEL_ENCAP_IP = 0x2 + LWTUNNEL_ENCAP_ILA = 0x3 + LWTUNNEL_ENCAP_IP6 = 0x4 + LWTUNNEL_ENCAP_SEG6 = 0x5 + LWTUNNEL_ENCAP_BPF = 0x6 + LWTUNNEL_ENCAP_SEG6_LOCAL = 0x7 + LWTUNNEL_ENCAP_RPL = 0x8 + LWTUNNEL_ENCAP_MAX = 0x8 + + MPLS_IPTUNNEL_UNSPEC = 0x0 + MPLS_IPTUNNEL_DST = 0x1 + MPLS_IPTUNNEL_TTL = 0x2 + MPLS_IPTUNNEL_MAX = 0x2 +) diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 82076fb74ff..9cd147b7e3f 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -32,6 +32,8 @@ type DLLError struct { func (e *DLLError) Error() string { return e.Msg } +func (e *DLLError) Unwrap() error { return e.Err } + // A DLL implements access to a single DLL. type DLL struct { Name string diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 9e3c44a8557..14906485f3a 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -1103,9 +1103,10 @@ type OBJECTS_AND_NAME struct { } //sys getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetSecurityInfo -//sys SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) = advapi32.SetSecurityInfo +//sys SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) = advapi32.SetSecurityInfo //sys getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetNamedSecurityInfoW //sys SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) = advapi32.SetNamedSecurityInfoW +//sys SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) = advapi32.SetKernelObjectSecurity //sys buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) = advapi32.BuildSecurityDescriptorW //sys initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) = advapi32.InitializeSecurityDescriptor diff --git a/vendor/golang.org/x/sys/windows/setupapierrors_windows.go b/vendor/golang.org/x/sys/windows/setupapierrors_windows.go new file mode 100644 index 00000000000..1681810e048 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/setupapierrors_windows.go @@ -0,0 +1,100 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import "syscall" + +const ( + ERROR_EXPECTED_SECTION_NAME syscall.Errno = 0x20000000 | 0xC0000000 | 0 + ERROR_BAD_SECTION_NAME_LINE syscall.Errno = 0x20000000 | 0xC0000000 | 1 + ERROR_SECTION_NAME_TOO_LONG syscall.Errno = 0x20000000 | 0xC0000000 | 2 + ERROR_GENERAL_SYNTAX syscall.Errno = 0x20000000 | 0xC0000000 | 3 + ERROR_WRONG_INF_STYLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x100 + ERROR_SECTION_NOT_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x101 + ERROR_LINE_NOT_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x102 + ERROR_NO_BACKUP syscall.Errno = 0x20000000 | 0xC0000000 | 0x103 + ERROR_NO_ASSOCIATED_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x200 + ERROR_CLASS_MISMATCH syscall.Errno = 0x20000000 | 0xC0000000 | 0x201 + ERROR_DUPLICATE_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x202 + ERROR_NO_DRIVER_SELECTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x203 + ERROR_KEY_DOES_NOT_EXIST syscall.Errno = 0x20000000 | 0xC0000000 | 0x204 + ERROR_INVALID_DEVINST_NAME syscall.Errno = 0x20000000 | 0xC0000000 | 0x205 + ERROR_INVALID_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x206 + ERROR_DEVINST_ALREADY_EXISTS syscall.Errno = 0x20000000 | 0xC0000000 | 0x207 + ERROR_DEVINFO_NOT_REGISTERED syscall.Errno = 0x20000000 | 0xC0000000 | 0x208 + ERROR_INVALID_REG_PROPERTY syscall.Errno = 0x20000000 | 0xC0000000 | 0x209 + ERROR_NO_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x20A + ERROR_NO_SUCH_DEVINST syscall.Errno = 0x20000000 | 0xC0000000 | 0x20B + ERROR_CANT_LOAD_CLASS_ICON syscall.Errno = 0x20000000 | 0xC0000000 | 0x20C + ERROR_INVALID_CLASS_INSTALLER syscall.Errno = 0x20000000 | 0xC0000000 | 0x20D + ERROR_DI_DO_DEFAULT syscall.Errno = 0x20000000 | 0xC0000000 | 0x20E + ERROR_DI_NOFILECOPY syscall.Errno = 0x20000000 | 0xC0000000 | 0x20F + ERROR_INVALID_HWPROFILE syscall.Errno = 0x20000000 | 0xC0000000 | 0x210 + ERROR_NO_DEVICE_SELECTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x211 + ERROR_DEVINFO_LIST_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x212 + ERROR_DEVINFO_DATA_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x213 + ERROR_DI_BAD_PATH syscall.Errno = 0x20000000 | 0xC0000000 | 0x214 + ERROR_NO_CLASSINSTALL_PARAMS syscall.Errno = 0x20000000 | 0xC0000000 | 0x215 + ERROR_FILEQUEUE_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x216 + ERROR_BAD_SERVICE_INSTALLSECT syscall.Errno = 0x20000000 | 0xC0000000 | 0x217 + ERROR_NO_CLASS_DRIVER_LIST syscall.Errno = 0x20000000 | 0xC0000000 | 0x218 + ERROR_NO_ASSOCIATED_SERVICE syscall.Errno = 0x20000000 | 0xC0000000 | 0x219 + ERROR_NO_DEFAULT_DEVICE_INTERFACE syscall.Errno = 0x20000000 | 0xC0000000 | 0x21A + ERROR_DEVICE_INTERFACE_ACTIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x21B + ERROR_DEVICE_INTERFACE_REMOVED syscall.Errno = 0x20000000 | 0xC0000000 | 0x21C + ERROR_BAD_INTERFACE_INSTALLSECT syscall.Errno = 0x20000000 | 0xC0000000 | 0x21D + ERROR_NO_SUCH_INTERFACE_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x21E + ERROR_INVALID_REFERENCE_STRING syscall.Errno = 0x20000000 | 0xC0000000 | 0x21F + ERROR_INVALID_MACHINENAME syscall.Errno = 0x20000000 | 0xC0000000 | 0x220 + ERROR_REMOTE_COMM_FAILURE syscall.Errno = 0x20000000 | 0xC0000000 | 0x221 + ERROR_MACHINE_UNAVAILABLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x222 + ERROR_NO_CONFIGMGR_SERVICES syscall.Errno = 0x20000000 | 0xC0000000 | 0x223 + ERROR_INVALID_PROPPAGE_PROVIDER syscall.Errno = 0x20000000 | 0xC0000000 | 0x224 + ERROR_NO_SUCH_DEVICE_INTERFACE syscall.Errno = 0x20000000 | 0xC0000000 | 0x225 + ERROR_DI_POSTPROCESSING_REQUIRED syscall.Errno = 0x20000000 | 0xC0000000 | 0x226 + ERROR_INVALID_COINSTALLER syscall.Errno = 0x20000000 | 0xC0000000 | 0x227 + ERROR_NO_COMPAT_DRIVERS syscall.Errno = 0x20000000 | 0xC0000000 | 0x228 + ERROR_NO_DEVICE_ICON syscall.Errno = 0x20000000 | 0xC0000000 | 0x229 + ERROR_INVALID_INF_LOGCONFIG syscall.Errno = 0x20000000 | 0xC0000000 | 0x22A + ERROR_DI_DONT_INSTALL syscall.Errno = 0x20000000 | 0xC0000000 | 0x22B + ERROR_INVALID_FILTER_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22C + ERROR_NON_WINDOWS_NT_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22D + ERROR_NON_WINDOWS_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22E + ERROR_NO_CATALOG_FOR_OEM_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x22F + ERROR_DEVINSTALL_QUEUE_NONNATIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x230 + ERROR_NOT_DISABLEABLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x231 + ERROR_CANT_REMOVE_DEVINST syscall.Errno = 0x20000000 | 0xC0000000 | 0x232 + ERROR_INVALID_TARGET syscall.Errno = 0x20000000 | 0xC0000000 | 0x233 + ERROR_DRIVER_NONNATIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x234 + ERROR_IN_WOW64 syscall.Errno = 0x20000000 | 0xC0000000 | 0x235 + ERROR_SET_SYSTEM_RESTORE_POINT syscall.Errno = 0x20000000 | 0xC0000000 | 0x236 + ERROR_SCE_DISABLED syscall.Errno = 0x20000000 | 0xC0000000 | 0x238 + ERROR_UNKNOWN_EXCEPTION syscall.Errno = 0x20000000 | 0xC0000000 | 0x239 + ERROR_PNP_REGISTRY_ERROR syscall.Errno = 0x20000000 | 0xC0000000 | 0x23A + ERROR_REMOTE_REQUEST_UNSUPPORTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x23B + ERROR_NOT_AN_INSTALLED_OEM_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x23C + ERROR_INF_IN_USE_BY_DEVICES syscall.Errno = 0x20000000 | 0xC0000000 | 0x23D + ERROR_DI_FUNCTION_OBSOLETE syscall.Errno = 0x20000000 | 0xC0000000 | 0x23E + ERROR_NO_AUTHENTICODE_CATALOG syscall.Errno = 0x20000000 | 0xC0000000 | 0x23F + ERROR_AUTHENTICODE_DISALLOWED syscall.Errno = 0x20000000 | 0xC0000000 | 0x240 + ERROR_AUTHENTICODE_TRUSTED_PUBLISHER syscall.Errno = 0x20000000 | 0xC0000000 | 0x241 + ERROR_AUTHENTICODE_TRUST_NOT_ESTABLISHED syscall.Errno = 0x20000000 | 0xC0000000 | 0x242 + ERROR_AUTHENTICODE_PUBLISHER_NOT_TRUSTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x243 + ERROR_SIGNATURE_OSATTRIBUTE_MISMATCH syscall.Errno = 0x20000000 | 0xC0000000 | 0x244 + ERROR_ONLY_VALIDATE_VIA_AUTHENTICODE syscall.Errno = 0x20000000 | 0xC0000000 | 0x245 + ERROR_DEVICE_INSTALLER_NOT_READY syscall.Errno = 0x20000000 | 0xC0000000 | 0x246 + ERROR_DRIVER_STORE_ADD_FAILED syscall.Errno = 0x20000000 | 0xC0000000 | 0x247 + ERROR_DEVICE_INSTALL_BLOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x248 + ERROR_DRIVER_INSTALL_BLOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x249 + ERROR_WRONG_INF_TYPE syscall.Errno = 0x20000000 | 0xC0000000 | 0x24A + ERROR_FILE_HASH_NOT_IN_CATALOG syscall.Errno = 0x20000000 | 0xC0000000 | 0x24B + ERROR_DRIVER_STORE_DELETE_FAILED syscall.Errno = 0x20000000 | 0xC0000000 | 0x24C + ERROR_UNRECOVERABLE_STACK_OVERFLOW syscall.Errno = 0x20000000 | 0xC0000000 | 0x300 + EXCEPTION_SPAPI_UNRECOVERABLE_STACK_OVERFLOW syscall.Errno = ERROR_UNRECOVERABLE_STACK_OVERFLOW + ERROR_NO_DEFAULT_INTERFACE_DEVICE syscall.Errno = ERROR_NO_DEFAULT_DEVICE_INTERFACE + ERROR_INTERFACE_DEVICE_ACTIVE syscall.Errno = ERROR_DEVICE_INTERFACE_ACTIVE + ERROR_INTERFACE_DEVICE_REMOVED syscall.Errno = ERROR_DEVICE_INTERFACE_REMOVED + ERROR_NO_SUCH_INTERFACE_DEVICE syscall.Errno = ERROR_NO_SUCH_DEVICE_INTERFACE +) diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 598e8ce5852..86a46f7713a 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -174,6 +174,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW //sys ExitProcess(exitcode uint32) //sys IsWow64Process(handle Handle, isWow64 *bool) (err error) = IsWow64Process +//sys IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint16) (err error) = IsWow64Process2? //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW //sys ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) //sys WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) @@ -187,6 +188,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys FindClose(handle Handle) (err error) //sys GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) //sys GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) +//sys SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) //sys GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) = GetCurrentDirectoryW //sys SetCurrentDirectory(path *uint16) (err error) = SetCurrentDirectoryW //sys CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) = CreateDirectoryW @@ -243,6 +245,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) = kernel32.GetFullPathNameW //sys GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) = kernel32.GetLongPathNameW //sys GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) = kernel32.GetShortPathNameW +//sys GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) = kernel32.GetFinalPathNameByHandleW //sys CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) = kernel32.CreateFileMappingW //sys MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) //sys UnmapViewOfFile(addr uintptr) (err error) @@ -275,7 +278,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo -//sys SetConsoleCursorPosition(console Handle, position Coord) (err error) = kernel32.SetConsoleCursorPosition +//sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot @@ -351,7 +354,6 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetThreadPreferredUILanguages //sys getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetUserPreferredUILanguages //sys getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetSystemPreferredUILanguages -//sys GetFinalPathNameByHandleW(file syscall.Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) = kernel32.GetFinalPathNameByHandleW // Process Status API (PSAPI) //sys EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses @@ -1480,3 +1482,7 @@ func getUILanguages(flags uint32, f func(flags uint32, numLanguages *uint32, buf return languages, nil } } + +func SetConsoleCursorPosition(console Handle, position Coord) error { + return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position)))) +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index da1652e74b0..e7ae37f8848 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1772,3 +1772,32 @@ const ( MUI_LANGUAGE_INSTALLED = 0x20 MUI_LANGUAGE_LICENSED = 0x40 ) + +// FILE_INFO_BY_HANDLE_CLASS constants for SetFileInformationByHandle/GetFileInformationByHandleEx +const ( + FileBasicInfo = 0 + FileStandardInfo = 1 + FileNameInfo = 2 + FileRenameInfo = 3 + FileDispositionInfo = 4 + FileAllocationInfo = 5 + FileEndOfFileInfo = 6 + FileStreamInfo = 7 + FileCompressionInfo = 8 + FileAttributeTagInfo = 9 + FileIdBothDirectoryInfo = 10 + FileIdBothDirectoryRestartInfo = 11 + FileIoPriorityHintInfo = 12 + FileRemoteProtocolInfo = 13 + FileFullDirectoryInfo = 14 + FileFullDirectoryRestartInfo = 15 + FileStorageInfo = 16 + FileAlignmentInfo = 17 + FileIdInfo = 18 + FileIdExtdDirectoryInfo = 19 + FileIdExtdDirectoryRestartInfo = 20 + FileDispositionInfoEx = 21 + FileRenameInfoEx = 22 + FileCaseSensitiveInfo = 23 + FileNormalizedNameInfo = 24 +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 5d0a54e690c..8fbef7da669 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -122,6 +122,7 @@ var ( procReportEventW = modadvapi32.NewProc("ReportEventW") procRevertToSelf = modadvapi32.NewProc("RevertToSelf") procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") + procSetKernelObjectSecurity = modadvapi32.NewProc("SetKernelObjectSecurity") procSetNamedSecurityInfoW = modadvapi32.NewProc("SetNamedSecurityInfoW") procSetSecurityDescriptorControl = modadvapi32.NewProc("SetSecurityDescriptorControl") procSetSecurityDescriptorDacl = modadvapi32.NewProc("SetSecurityDescriptorDacl") @@ -248,6 +249,7 @@ var ( procGetVolumePathNamesForVolumeNameW = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW") procGetWindowsDirectoryW = modkernel32.NewProc("GetWindowsDirectoryW") procIsWow64Process = modkernel32.NewProc("IsWow64Process") + procIsWow64Process2 = modkernel32.NewProc("IsWow64Process2") procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") procLocalFree = modkernel32.NewProc("LocalFree") @@ -283,6 +285,7 @@ var ( procSetEvent = modkernel32.NewProc("SetEvent") procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") procSetFilePointer = modkernel32.NewProc("SetFilePointer") procSetFileTime = modkernel32.NewProc("SetFileTime") procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") @@ -970,6 +973,14 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE return } +func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { + r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { var _p0 *uint16 _p0, ret = syscall.UTF16PtrFromString(objectName) @@ -1056,8 +1067,11 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl * return } -func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) { - syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) +func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { + r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } return } @@ -1727,7 +1741,7 @@ func GetFileType(filehandle Handle) (n uint32, err error) { return } -func GetFinalPathNameByHandleW(file syscall.Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) { +func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) { r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0) n = uint32(r0) if n == 0 { @@ -2055,6 +2069,18 @@ func IsWow64Process(handle Handle, isWow64 *bool) (err error) { return } +func IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint16) (err error) { + err = procIsWow64Process2.Find() + if err != nil { + return + } + r1, _, e1 := syscall.Syscall(procIsWow64Process2.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(libname) @@ -2316,8 +2342,8 @@ func ResumeThread(thread Handle) (ret uint32, err error) { return } -func SetConsoleCursorPosition(console Handle, position Coord) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(*((*uint32)(unsafe.Pointer(&position)))), 0) +func setConsoleCursorPosition(console Handle, position uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) if r1 == 0 { err = errnoErr(e1) } @@ -2386,6 +2412,14 @@ func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) return } +func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) newlowoffset = uint32(r0) diff --git a/vendor/modules.txt b/vendor/modules.txt index 96e32ef815f..940300a1e18 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -72,7 +72,10 @@ github.com/vishvananda/netns # github.com/willf/bitset v1.1.11 ## explicit github.com/willf/bitset -# golang.org/x/sys v0.0.0-20201107080550-4d91cf3a1aaf +# golang.org/x/net v0.0.0-20201224014010-6772e930b67b +## explicit +golang.org/x/net/bpf +# golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 ## explicit golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix