From 5fb6511cc61b03304dd83c204a97334b0bf0967c Mon Sep 17 00:00:00 2001 From: Tony Arcieri Date: Tue, 4 Jun 2019 14:30:14 -0700 Subject: [PATCH] zeroize: Remove CPU fences Per @RalfJung: https://github.com/iqlusioninc/crates/pull/214#discussion_r290488758 > AFAIK atomic fences only really prevent reordering of atomic accesses. > Non-atomic accesses can still be reordered. The interaction of this > with volatile accesses is entirely unclear -- technically, they are > non-atomic, but in practice it seems unlikely that the compiler would > perform such reorderings. > [...] > A compiler fence seems like a reasonable precaution and also helps > make sure the side-effects happens near where the programmer might > expect it, but a CPU fence to me sounds more like cargo cult. --- zeroize/src/lib.rs | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/zeroize/src/lib.rs b/zeroize/src/lib.rs index d0abcbad..f9860855 100644 --- a/zeroize/src/lib.rs +++ b/zeroize/src/lib.rs @@ -124,19 +124,15 @@ //! [these remarks have been removed] and the specific usage pattern in this //! crate is considered to be well-defined. //! -//! To help mitigate concerns about reordering of operations executed by the -//! CPU potentially exposing values after they have been zeroed, this crate -//! leverages the [core::sync::atomic] memory fence functions including -//! [compiler_fence] and [fence] (which uses the CPU's native fence -//! instructions). These fences are leveraged with the strictest ordering -//! guarantees, [Ordering::SeqCst], which ensures no accesses are reordered. +//! Additionally this crate leverages [compiler_fence] from +//! [core::sync::atomic] with the strictest ordering ([Ordering::SeqCst]) +//! as a precaution to help ensure reads are not reordered before memory has +//! been zeroed. //! //! All of that said, there is still potential for microarchitectural attacks -//! (ala Spectre/Meltdown) to leak "zeroized" secrets through covert channels -//! (e.g. the memory fences mentioned above have previously been used as a -//! covert channel in the Foreshadow attack). This crate makes no guarantees -//! that zeroized values cannot be leaked through such channels, as they -//! represent flaws in the underlying hardware. +//! (ala Spectre/Meltdown) to leak "zeroized" secrets through covert channels. +//! This crate makes no guarantees that zeroized values cannot be leaked +//! through such channels, as they represent flaws in the underlying hardware. //! //! ## Stack/Heap Zeroing Notes //! @@ -196,7 +192,6 @@ //! [core::sync::atomic]: https://doc.rust-lang.org/stable/core/sync/atomic/index.html //! [Ordering::SeqCst]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html#variant.SeqCst //! [compiler_fence]: https://doc.rust-lang.org/stable/core/sync/atomic/fn.compiler_fence.html -//! [fence]: https://doc.rust-lang.org/stable/core/sync/atomic/fn.fence.html //! [memory-model]: https://github.com/nikomatsakis/rust-memory-model //! [unordered]: https://llvm.org/docs/Atomics.html#unordered //! [llvm-atomic]: https://github.com/rust-lang/rust/issues/58599 @@ -305,11 +300,9 @@ where /// Implement `Zeroize` on slices of types that can be zeroized with `Default`. /// -/// This impl can eventually be optimized using an atomic memset intrinsic, -/// such as `llvm.memset.element.unordered.atomic`. For that reason the blanket -/// impl on slices is bounded by `DefaultIsZeroes`. See: -/// -/// +/// This impl can eventually be optimized using an memset intrinsic, +/// such as `core::intrinsics::volatile_set_memory`. For that reason the blanket +/// impl on slices is bounded by `DefaultIsZeroes`. /// /// To zeroize a mut slice of `Z: Zeroize` which does not impl /// `DefaultIsZeroes`, call `iter_mut().zeroize()`. @@ -404,7 +397,6 @@ where /// see zeroes after this point. #[inline] fn atomic_fence() { - atomic::fence(atomic::Ordering::SeqCst); atomic::compiler_fence(atomic::Ordering::SeqCst); }