Skip to content

Commit

Permalink
Merge pull request #193 from a16z/dedupe/full
Browse files Browse the repository at this point in the history
Dedupe
  • Loading branch information
moodlezoup authored Mar 21, 2024
2 parents e2f663c + fb1a2d8 commit 232a540
Show file tree
Hide file tree
Showing 13 changed files with 524 additions and 515 deletions.
6 changes: 5 additions & 1 deletion common/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,10 @@ ark-ec = "0.4.2"
ark-serialize = "0.4.2"
bellpepper-core = "0.4.0"
ff = "0.13.0"
spartan2 = { git = "https://github.com/a16z/Spartan2"}
spartan2 = { git = "https://github.com/a16z/Spartan2" }
ruint = "1.11.1"
halo2curves = "0.6.0"
<<<<<<< HEAD
=======
rayon = { version = "^1.8.0" }
>>>>>>> c2de8c4 (Parallelize ark -> ff conversion)
2 changes: 1 addition & 1 deletion common/src/constants.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@ pub const REGISTER_START_ADDRESS: usize = 0;
pub const RAM_START_ADDRESS: u64 = 0x80000000;
pub const BYTES_PER_INSTRUCTION: usize = 4;
pub const MEMORY_OPS_PER_INSTRUCTION: usize = 7;
pub const NUM_R1CS_POLYS: usize = 82;
pub const NUM_R1CS_POLYS: usize = 1;
23 changes: 22 additions & 1 deletion common/src/field_conversion.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,10 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
use ff::PrimeField as GenericPrimeField;
use halo2curves::group::prime::PrimeCurveAffine;
use halo2curves::CurveAffine;

use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
use spartan2::provider::bn256_grumpkin::bn256::Affine as Spartan2Affine;
use spartan2::provider::bn256_grumpkin::bn256::Scalar as Spartan2Fr;

pub fn ark_to_spartan<ArkF: ArkPrimeField>(ark: ArkF) -> Spartan2Fr {
let bigint: <ArkF as ArkPrimeField>::BigInt = ark.into_bigint();
let bytes = bigint.to_bytes_le();
Expand Down Expand Up @@ -80,6 +81,7 @@ pub fn spartan_to_ark_unsafe<FF: GenericPrimeField<Repr = [u8; 32]>, AF: ArkPrim
}
ark
}

pub trait IntoArk: CurveAffine {
type ArkConfig: SWCurveConfig;

Expand Down Expand Up @@ -126,6 +128,25 @@ pub trait IntoSpartan: ark_ec::AffineRepr {
}
}
}

fn to_spartan_bn256(&self) -> halo2curves::bn256::G1Affine {
match self.xy() {
None => halo2curves::bn256::G1Affine::identity(),
Some((x, y)) => {
let [x, y] = [x,y].map(|ark_f| {
let mut ff_repr = <<halo2curves::bn256::G1Affine as CurveAffine>::Base as GenericPrimeField>::Repr::default();
let ff_bytes = ff_repr.as_mut();
ark_f.serialize_compressed(ff_bytes).unwrap();
ff_repr
});
halo2curves::bn256::G1Affine::from_xy(
<halo2curves::bn256::G1Affine as CurveAffine>::Base::from_repr(x).unwrap(),
<halo2curves::bn256::G1Affine as CurveAffine>::Base::from_repr(y).unwrap(),
)
.unwrap()
}
}
}
}

impl IntoArk for Spartan2Affine {
Expand Down
4 changes: 3 additions & 1 deletion jolt-core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -67,14 +67,16 @@ compiler = { path = "../compiler" }
# witness = { git = "https://github.com/philsippl/circom-witness-rs" }
witness = { git = "https://github.com/sragss/circom-witness-rs", branch = "non-ruint-eval" }
ruint = "1.11.1"
spartan2 = { git = "https://github.com/a16z/Spartan2.git" }
spartan2 = { git = "https://github.com/a16z/Spartan2.git", branch = "dedupe/full"}
# spartan2 = { path = "../Spartan2/" }
ark-bn254 = "0.4.0"
lazy_static = "1.4.0"
halo2curves = "0.6.1"
fixedbitset = "0.5.0"

[build-dependencies]
common = { path = "../common" }
# witness = { git = "https://github.com/sragss/circom-witness-rs", branch = "non-ruint-eval", features = ["build-witness"] }

[lib]
name = "liblasso"
Expand Down
22 changes: 21 additions & 1 deletion jolt-core/src/jolt/vm/bytecode.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use ark_ec::CurveGroup;
use ark_ff::PrimeField;
use halo2curves::group::Curve;
use merlin::Transcript;
use rand::rngs::StdRng;
use rand_core::RngCore;
Expand Down Expand Up @@ -311,6 +312,19 @@ impl<F: PrimeField, G: CurveGroup<ScalarField = F>> BytecodePolynomials<F, G> {
}
}

pub fn get_polys_r1cs(&self) -> (Vec<F>, Vec<F>) {
let a_read_write_evals = self.a_read_write.evals().clone();
let v_read_write_evals = [
self.v_read_write.opcode.evals(),
self.v_read_write.rs1.evals(),
self.v_read_write.rs2.evals(),
self.v_read_write.rd.evals(),
self.v_read_write.imm.evals(),
].concat();

(a_read_write_evals, v_read_write_evals)
}

#[tracing::instrument(skip_all, name = "BytecodePolynomials::new")]
pub fn r1cs_polys_from_bytecode(
mut bytecode: Vec<ELFRow>,
Expand All @@ -337,7 +351,7 @@ impl<F: PrimeField, G: CurveGroup<ScalarField = F>> BytecodePolynomials<F, G> {
for (trace_index, trace) in trace.iter().take(num_ops).enumerate() {
let address = trace.address * 4 + RAM_START_ADDRESS as usize;
// debug_assert!(address < code_size);
a_read_write_usize[trace_index] = address;
a_read_write_usize[trace_index] = trace.address;
let counter = final_cts[trace.address];
read_cts[trace_index] = counter;
final_cts[trace.address] = counter + 1;
Expand Down Expand Up @@ -440,6 +454,12 @@ pub struct BytecodeCommitment<G: CurveGroup> {
pub init_final_commitments: ConcatenatedPolynomialCommitment<G>,
}

// impl<G: CurveGroup> BytecodeCommitment<G> {
// pub fn get_polys_r1cs(&self) -> Vec<HyraxCommitment<NUM_R1CS_POLYS, G>> {
// self.read_write_commitments
// }
// }

impl<F, G> BatchablePolynomials<G> for BytecodePolynomials<F, G>
where
F: PrimeField,
Expand Down
Loading

0 comments on commit 232a540

Please sign in to comment.