Skip to content

Commit

Permalink
treewide: Cleanup, additional CI checks (#8)
Browse files Browse the repository at this point in the history
  • Loading branch information
woodruffw authored Nov 2, 2020
1 parent d51430f commit afcfdb7
Show file tree
Hide file tree
Showing 33 changed files with 595 additions and 562 deletions.
14 changes: 10 additions & 4 deletions .github/workflows/nightly.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,20 @@ jobs:
steps:
- uses: actions/checkout@master

- name: Set nightly toolchain
- name: Set toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
components: rustfmt, clippy

- name: Format
run: cargo fmt

- name: Lint
run: cargo clippy -- -D warnings

- name: Build
run: RUSTFLAGS="-C target-cpu=native" cargo build
run: cargo build

- name: Test
run: RUSTFLAGS="-C target-cpu=native" cargo test
run: cargo test
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ blake3 = "0.3.4"
crossbeam = "0.7.3"
fnv = "1.0.7"
hex = "0.4"
itertools = "0.9"
rand = "0.7.3"
rand_chacha = "0.2.2"
rand_core = "*"
Expand Down
14 changes: 12 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,19 @@

Reverie is an implementation (prover and verifier) of the MPC-in-the-head NIZKPoK outlined in
[Improved Non-Interactive Zero Knowledge with Applications to Post-Quantum Signatures](https://eprint.iacr.org/2018/475).
Reverie seeks to offer concrete prover efficiency (linear proving time with small constants) for complex predicates.
The implementation seeks to offer 128-bits of (classical) security and support arbitrary rings, most efficiently Z_2, Z_8, Z_16, Z_32 and Z_64.
Reverie seeks to offer concrete prover efficiency (linear proving time with small constants) for
complex predicates. The implementation seeks to offer 128-bits of (classical) security and support
arbitrary rings, most efficiently
Z<sub>2</sub>, Z<sub>8</sub>, Z<sub>16</sub>, Z<sub>32</sub> and Z<sub>64</sub>.

Reverie provides both a library (with a simplified and a streaming interface),
in addition to a "companion" program for proving/verifying statements specified in Bristol format
to enable easy experimentation.

## Building

Reverie requires a relatively recent `nightly` Rust.

```bash
$ cargo build
```
1 change: 1 addition & 0 deletions rust-toolchain
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
nightly
20 changes: 12 additions & 8 deletions src/algebra/gf2/batch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ use super::*;

use crate::util::Writer;

use itertools::izip;
use serde::{Deserialize, Serialize};

use std::fmt;
Expand Down Expand Up @@ -51,12 +52,13 @@ impl fmt::Debug for BitBatch {
impl Add for BitBatch {
type Output = Self;

#[allow(clippy::suspicious_arithmetic_impl)]
#[inline(always)]
fn add(self, other: Self) -> Self::Output {
// LLVM optimizes this into a single XOR between 64-bit integers
let mut res: [u8; BATCH_SIZE_BYTES] = [0; BATCH_SIZE_BYTES];
for i in 0..BATCH_SIZE_BYTES {
res[i] = self.0[i] ^ other.0[i];
for (res_byte, self_byte, other_byte) in izip!(&mut res, &self.0, &other.0) {
*res_byte = self_byte ^ other_byte;
}
Self(res)
}
Expand All @@ -65,6 +67,7 @@ impl Add for BitBatch {
impl Sub for BitBatch {
type Output = Self;

#[allow(clippy::suspicious_arithmetic_impl)]
#[inline(always)]
fn sub(self, other: Self) -> Self::Output {
self + other
Expand All @@ -74,12 +77,13 @@ impl Sub for BitBatch {
impl Mul for BitBatch {
type Output = Self;

#[allow(clippy::suspicious_arithmetic_impl)]
#[inline(always)]
fn mul(self, other: Self) -> Self::Output {
// LLVM optimizes this into a single XOR between 64-bit integers
// LLVM optimizes this into a single AND between 64-bit integers
let mut res: [u8; BATCH_SIZE_BYTES] = [0; BATCH_SIZE_BYTES];
for i in 0..BATCH_SIZE_BYTES {
res[i] = self.0[i] & other.0[i];
for (res_byte, self_byte, other_byte) in izip!(&mut res, &self.0, &other.0) {
*res_byte = self_byte & other_byte;
}
Self(res)
}
Expand All @@ -94,10 +98,10 @@ impl RingModule<BitScalar> for BitBatch {
const DIMENSION: usize = BATCH_SIZE_BITS;

#[inline(always)]
fn action(&self, s: BitScalar) -> Self {
fn action(&self, scalar: BitScalar) -> Self {
let mut res: [u8; BATCH_SIZE_BYTES] = [0; BATCH_SIZE_BYTES];
for i in 0..BATCH_SIZE_BYTES {
res[i] = s.0 * self.0[i];
for (res_byte, self_byte) in res.iter_mut().zip(&self.0) {
*res_byte = scalar.0 * self_byte;
}
Self(res)
}
Expand Down
30 changes: 17 additions & 13 deletions src/algebra/gf2/domain64.rs
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,9 @@ impl GF2P64 {
dst: &mut [<Self as Domain>::Batch],
src: &[<Self as Domain>::Sharing],
) {
// NOTE(ww): This is safe, since we fully initialize sharings immediately below.
// We could probably avoid this with an impl `From<Domain::Sharing> for ...`
#[allow(clippy::uninit_assumed_init)]
let mut sharings: [[u8; 8]; <Self as Domain>::Batch::DIMENSION] =
MaybeUninit::uninit().assume_init();

Expand Down Expand Up @@ -315,6 +318,9 @@ impl GF2P64 {
dst: &mut [<Self as Domain>::Batch],
src: &[<Self as Domain>::Sharing],
) {
// NOTE(ww): This is safe, since we fully initialize sharings immediately below.
// We could probably avoid this with an impl `From<Domain::Sharing> for ...`
#[allow(clippy::uninit_assumed_init)]
let mut sharings: [[u8; 8]; <Self as Domain>::Batch::DIMENSION] =
MaybeUninit::uninit().assume_init();

Expand Down Expand Up @@ -354,7 +360,7 @@ impl GF2P64 {
// transpose two batches at a time, byte-by-byte
for i in (0..(<Self as Domain>::Sharing::DIMENSION / 8)).step_by(2) {
// pack 2 bytes from 64 different players
let mut v: [__m128i; 8] = [
let mut vecs: [__m128i; 8] = [
pack8x2!(0x00, i),
pack8x2!(0x08, i),
pack8x2!(0x10, i),
Expand All @@ -370,15 +376,15 @@ impl GF2P64 {

for _ in 0..8 {
let masks: [_; 8] = [
_mm_movemask_epi8(v[0]),
_mm_movemask_epi8(v[1]),
_mm_movemask_epi8(v[2]),
_mm_movemask_epi8(v[3]),
_mm_movemask_epi8(vecs[0]),
_mm_movemask_epi8(vecs[1]),
_mm_movemask_epi8(vecs[2]),
_mm_movemask_epi8(vecs[3]),
//
_mm_movemask_epi8(v[4]),
_mm_movemask_epi8(v[5]),
_mm_movemask_epi8(v[6]),
_mm_movemask_epi8(v[7]),
_mm_movemask_epi8(vecs[4]),
_mm_movemask_epi8(vecs[5]),
_mm_movemask_epi8(vecs[6]),
_mm_movemask_epi8(vecs[7]),
];

dst[idx] = BitBatch([
Expand All @@ -403,8 +409,8 @@ impl GF2P64 {
masks[7] as u8,
]);

for i in 0..8 {
v[i] = _mm_add_epi8(v[i], v[i]);
for vec in &mut vecs {
*vec = _mm_add_epi8(*vec, *vec);
}

idx += 1;
Expand All @@ -422,7 +428,6 @@ impl Domain for GF2P64 {
const PREPROCESSING_REPETITIONS: usize = 631;
const ONLINE_REPETITIONS: usize = 23;

#[inline(always)]
fn convert(dst: &mut [Self::Sharing], src: &[Self::Batch]) {
// do a single bounds check up front
assert_eq!(src.len(), Self::PLAYERS);
Expand All @@ -440,7 +445,6 @@ impl Domain for GF2P64 {
compile_error!("unsupported platform: requires x86{-64} with SSE2 or AVX2");
}

#[inline(always)]
fn convert_inv(dst: &mut [Self::Batch], src: &[Self::Sharing]) {
assert_eq!(src.len(), Self::Batch::DIMENSION);
assert_eq!(dst.len(), Self::Sharing::DIMENSION);
Expand Down
56 changes: 26 additions & 30 deletions src/algebra/gf2/domain8.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ pub struct GF2P8 {}

impl GF2P8 {
// This codes assumes that a bounds check has been done prior to the call.
#[inline(always)]
#[cfg(any(all(not(target_feature = "avx2"), not(target_feature = "sse2")), test))]
fn convert_generic(dst: &mut [BitSharing8], src: &[BitBatch]) {
let mut idx = 0;
Expand Down Expand Up @@ -53,7 +52,7 @@ impl GF2P8 {
// transpose four batches at a time, byte-by-byte
for i in (0..BATCH_SIZE_BYTES).step_by(4) {
// pack 4 bytes from 8 different shares
let mut v = _mm256_set_epi8(
let mut vecs = _mm256_set_epi8(
src.get_unchecked(0).0[i] as i8,
src.get_unchecked(1).0[i] as i8,
src.get_unchecked(2).0[i] as i8,
Expand Down Expand Up @@ -91,20 +90,20 @@ impl GF2P8 {
// calculate the 8 sharings
let mut idx = i * 8;
for _ in 0..8 {
let mask = _mm256_movemask_epi8(v);
let mask = _mm256_movemask_epi8(vecs);
dst[idx] = BitSharing8((mask >> 24) as u8);
dst[idx + 8] = BitSharing8((mask >> 16) as u8);
dst[idx + 16] = BitSharing8((mask >> 8) as u8);
dst[idx + 24] = BitSharing8(mask as u8);
v = _mm256_add_epi8(v, v);
vecs = _mm256_add_epi8(vecs, vecs);
idx += 1;
}

// assert all bits consumed
debug_assert_eq!(
{
let v = _mm256_add_epi8(v, v);
_mm256_movemask_epi8(v)
let vecs = _mm256_add_epi8(vecs, vecs);
_mm256_movemask_epi8(vecs)
},
0
)
Expand All @@ -123,7 +122,7 @@ impl GF2P8 {
// transpose four batches at a time, byte-by-byte
for i in (0..BATCH_SIZE_BYTES).step_by(2) {
// pack 2 bytes from 8 different shares
let mut v = _mm_set_epi8(
let mut vecs = _mm_set_epi8(
src.get_unchecked(0).0[i] as i8,
src.get_unchecked(1).0[i] as i8,
src.get_unchecked(2).0[i] as i8,
Expand All @@ -145,18 +144,18 @@ impl GF2P8 {
// calculate the 8 sharings
let mut idx = i * 8;
for _ in 0..8 {
let mask = _mm_movemask_epi8(v);
let mask = _mm_movemask_epi8(vecs);
dst[idx] = BitSharing8((mask >> 8) as u8);
dst[idx + 8] = BitSharing8(mask as u8);
v = _mm_add_epi8(v, v);
vecs = _mm_add_epi8(vecs, vecs);
idx += 1;
}

// assert all bits consumed
debug_assert_eq!(
{
let v = _mm_add_epi8(v, v);
_mm_movemask_epi8(v)
let vecs = _mm_add_epi8(vecs, vecs);
_mm_movemask_epi8(vecs)
},
0
)
Expand All @@ -173,7 +172,7 @@ impl GF2P8 {
use core::arch::x86_64::*;

// use 2 x 256-bit registers
let mut v: [__m256i; 2] = [
let mut vecs: [__m256i; 2] = [
_mm256_set_epi8(
src[0x00].0 as i8,
src[0x01].0 as i8,
Expand Down Expand Up @@ -244,15 +243,15 @@ impl GF2P8 {
),
];

for p in 0..<Self as Domain>::Sharing::DIMENSION {
for d in dst.iter_mut().take(<Self as Domain>::Sharing::DIMENSION) {
for i in 0..2 {
let base = i * 4;
let mask = _mm256_movemask_epi8(v[i]);
(dst[p].0)[base] = (mask >> 24) as u8;
(dst[p].0)[base + 1] = (mask >> 16) as u8;
(dst[p].0)[base + 2] = (mask >> 8) as u8;
(dst[p].0)[base + 3] = mask as u8;
v[i] = _mm256_add_epi8(v[i], v[i]);
let mask = _mm256_movemask_epi8(vecs[i]);
(d.0)[base] = (mask >> 24) as u8;
(d.0)[base + 1] = (mask >> 16) as u8;
(d.0)[base + 2] = (mask >> 8) as u8;
(d.0)[base + 3] = mask as u8;
vecs[i] = _mm256_add_epi8(vecs[i], vecs[i]);
}
}
}
Expand All @@ -267,7 +266,7 @@ impl GF2P8 {
use core::arch::x86_64::*;

// use 4 x 128-bit registers
let mut v: [__m128i; 4] = [
let mut vecs: [__m128i; 4] = [
_mm_set_epi8(
src[0x00].0 as i8,
src[0x01].0 as i8,
Expand Down Expand Up @@ -342,19 +341,18 @@ impl GF2P8 {
),
];

for p in 0..<Self as Domain>::Sharing::DIMENSION {
for i in 0..4 {
for d in dst.iter_mut().take(<Self as Domain>::Sharing::DIMENSION) {
for (i, vec) in vecs.iter_mut().enumerate() {
let base = i * 2;
let mask = _mm_movemask_epi8(v[i]);
(dst[p].0)[base] = (mask >> 8) as u8;
(dst[p].0)[base + 1] = mask as u8;
v[i] = _mm_add_epi8(v[i], v[i]);
let mask = _mm_movemask_epi8(*vec);
(d.0)[base] = (mask >> 8) as u8;
(d.0)[base + 1] = mask as u8;
*vec = _mm_add_epi8(*vec, *vec);
}
}
}

// This codes assumes that a bounds check has been done prior to the call.
#[inline(always)]
#[cfg(any(all(not(target_feature = "avx2"), not(target_feature = "sse2")), test))]
fn convert_inv_generic(dst: &mut [BitBatch], src: &[BitSharing8]) {
for i in 0..BATCH_SIZE_BYTES {
Expand Down Expand Up @@ -393,7 +391,6 @@ impl Domain for GF2P8 {
const PREPROCESSING_REPETITIONS: usize = 252;
const ONLINE_REPETITIONS: usize = 44;

#[inline(always)]
fn convert(dst: &mut [Self::Sharing], src: &[Self::Batch]) {
// do a single bounds check up front
assert_eq!(src.len(), Self::PLAYERS);
Expand All @@ -420,7 +417,6 @@ impl Domain for GF2P8 {

// converts 64 sharings between 8 players to 8 batches of 64 sharings:
// one batch per player.
#[inline(always)]
fn convert_inv(dst: &mut [Self::Batch], src: &[Self::Sharing]) {
// there should be enough sharings to fill a batch
assert_eq!(src.len(), Self::Batch::DIMENSION);
Expand Down Expand Up @@ -491,8 +487,8 @@ mod test {
mod benchmark {
use super::*;

use rand::thread_rng;
use ::test::{black_box, Bencher};
use rand::thread_rng;

#[bench]
fn bench_gf2p8_convert(b: &mut Bencher) {
Expand Down
2 changes: 1 addition & 1 deletion src/algebra/gf2/scalar.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ impl Packable for BitScalar {
}
if rem != 0 {
debug_assert!(rem < 8);
pac = pac << (8 - rem);
pac <<= 8 - rem;
dst.write_all(&[pac])
} else {
Ok(())
Expand Down
3 changes: 3 additions & 0 deletions src/algebra/gf2/share64.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ impl LocalOperation for BitSharing64 {}
impl Add for BitSharing64 {
type Output = Self;

#[allow(clippy::suspicious_arithmetic_impl)]
fn add(self, other: Self) -> Self::Output {
Self(self.0 ^ other.0)
}
Expand All @@ -17,6 +18,7 @@ impl Add for BitSharing64 {
impl Sub for BitSharing64 {
type Output = Self;

#[allow(clippy::suspicious_arithmetic_impl)]
fn sub(self, other: Self) -> Self::Output {
self + other
}
Expand All @@ -25,6 +27,7 @@ impl Sub for BitSharing64 {
impl Mul for BitSharing64 {
type Output = Self;

#[allow(clippy::suspicious_arithmetic_impl)]
fn mul(self, other: Self) -> Self::Output {
Self(self.0 & other.0)
}
Expand Down
Loading

0 comments on commit afcfdb7

Please sign in to comment.