diff --git a/pkg/addr/isdas.go b/pkg/addr/isdas.go index 6eb1f6909..2cb08c6b8 100644 --- a/pkg/addr/isdas.go +++ b/pkg/addr/isdas.go @@ -241,8 +241,8 @@ func (ia *IA) UnmarshalText(b []byte) error { return nil } -// @ pure // @ decreases +// @ pure func (ia IA) IsZero() bool { return ia == 0 } diff --git a/pkg/experimental/epic/epic.go b/pkg/experimental/epic/epic.go index a183f361d..a316fd5a0 100644 --- a/pkg/experimental/epic/epic.go +++ b/pkg/experimental/epic/epic.go @@ -205,7 +205,8 @@ func CoreFromPktCounter(counter uint32) (uint8, uint32) { // @ requires len(key) == 16 // @ preserves acc(sl.Bytes(key, 0, len(key)), R50) -// @ ensures reserr == nil ==> res != nil && res.Mem() && res.BlockSize() == 16 +// @ ensures reserr == nil ==> +// @ res != nil && res.Mem() && res.BlockSize() == 16 // @ ensures reserr != nil ==> reserr.ErrorMem() // @ decreases func initEpicMac(key []byte) (res cipher.BlockMode, reserr error) { diff --git a/pkg/experimental/epic/epic_spec.gobra b/pkg/experimental/epic/epic_spec.gobra index 7a8fdc0ed..8b0e7a795 100644 --- a/pkg/experimental/epic/epic_spec.gobra +++ b/pkg/experimental/epic/epic_spec.gobra @@ -19,13 +19,14 @@ package epic import sl "github.com/scionproto/scion/verification/utils/slices" pred postInitInvariant() { - acc(&zeroInitVector, _) && + acc(&zeroInitVector) && len(zeroInitVector[:]) == 16 && - acc(sl.Bytes(zeroInitVector[:], 0, len(zeroInitVector[:])), _) + acc(sl.Bytes(zeroInitVector[:], 0, len(zeroInitVector[:]))) } // learn the invariant established by init ghost +trusted // TODO: drop after init invs are added ensures acc(postInitInvariant(), _) decreases _ func establishPostInitInvariant() \ No newline at end of file diff --git a/pkg/scrypto/scrypto_spec.gobra b/pkg/scrypto/scrypto_spec.gobra index 9244f498b..b33787ac5 100644 --- a/pkg/scrypto/scrypto_spec.gobra +++ b/pkg/scrypto/scrypto_spec.gobra @@ -27,7 +27,7 @@ import sl "github.com/scionproto/scion/verification/utils/slices" // calls to it will also succeed. This behaviour is abstracted using this // ghost function. ghost -requires acc(sl.Bytes(key, 0, len(key)), _) +requires sl.Bytes(key, 0, len(key)) decreases _ pure func ValidKeyForHash(key []byte) bool diff --git a/pkg/slayers/path/empty/empty_spec.gobra b/pkg/slayers/path/empty/empty_spec.gobra index 044a671fa..60501e4a5 100644 --- a/pkg/slayers/path/empty/empty_spec.gobra +++ b/pkg/slayers/path/empty/empty_spec.gobra @@ -35,16 +35,14 @@ func (e Path) DowngradePerm(buf []byte) { } ghost -pure decreases -func (p Path) IsValidResultOfDecoding(b []byte, err error) (res bool) { +pure func (p Path) IsValidResultOfDecoding(b []byte, err error) (res bool) { return true } ghost -pure decreases -func (p Path) LenSpec(ghost ub []byte) (l int) { +pure func (p Path) LenSpec(ghost ub []byte) (l int) { return PathLen } diff --git a/pkg/slayers/path/epic/epic.go b/pkg/slayers/path/epic/epic.go index b9400a1ca..c9d770163 100644 --- a/pkg/slayers/path/epic/epic.go +++ b/pkg/slayers/path/epic/epic.go @@ -225,7 +225,6 @@ func (p *Path) Len( /*@ ghost ubuf []byte @*/ ) (l int) { // Type returns the EPIC path type identifier. // @ pure -// @ requires acc(p.Mem(ubuf), _) // @ ensures t == PathType // @ decreases func (p *Path) Type( /*@ ghost ubuf []byte @*/ ) (t path.Type) { diff --git a/pkg/slayers/path/epic/epic_spec.gobra b/pkg/slayers/path/epic/epic_spec.gobra index 93380d08c..9a59f029c 100644 --- a/pkg/slayers/path/epic/epic_spec.gobra +++ b/pkg/slayers/path/epic/epic_spec.gobra @@ -38,11 +38,10 @@ pred (p *Path) Mem(ubuf []byte) { } ghost -pure -requires acc(p.Mem(ub), _) +requires p.Mem(ub) decreases -func (p *Path) LenSpec(ghost ub []byte) (l int) { - return unfolding acc(p.Mem(ub), _) in +pure func (p *Path) LenSpec(ghost ub []byte) (l int) { + return unfolding p.Mem(ub) in (p.ScionPath == nil ? MetadataLen : MetadataLen + p.ScionPath.LenSpec(ub[MetadataLen:])) @@ -58,51 +57,48 @@ func (p *Path) DowngradePerm(buf []byte) { } ghost -requires acc(r.Mem(ub), _) +requires r.Mem(ub) decreases pure func (r *Path) GetBase(ub []byte) scion.Base { - return unfolding acc(r.Mem(ub), _) in + return unfolding r.Mem(ub) in r.ScionPath.GetBase(ub[MetadataLen:]) } ghost -pure -requires acc(p.Mem(buf), _) -ensures l == (unfolding acc(p.Mem(buf), _) in len(p.PHVF)) +requires p.Mem(buf) decreases -func (p *Path) getPHVFLen(buf []byte) (l int) { - return unfolding acc(p.Mem(buf), _) in len(p.PHVF) +pure func (p *Path) getPHVFLen(buf []byte) (l int) { + return unfolding p.Mem(buf) in + len(p.PHVF) } ghost -pure -requires acc(p.Mem(buf), _) -ensures l == (unfolding acc(p.Mem(buf), _) in len(p.LHVF)) +requires p.Mem(buf) decreases -func (p *Path) getLHVFLen(buf []byte) (l int) { - return unfolding acc(p.Mem(buf), _) in len(p.LHVF) +pure func (p *Path) getLHVFLen(buf []byte) (l int) { + return unfolding p.Mem(buf) in + len(p.LHVF) } ghost -pure -requires acc(p.Mem(buf), _) -ensures r == (unfolding acc(p.Mem(buf), _) in p.ScionPath != nil) +requires p.Mem(buf) decreases -func (p *Path) hasScionPath(buf []byte) (r bool) { - return unfolding acc(p.Mem(buf), _) in p.ScionPath != nil +pure func (p *Path) hasScionPath(buf []byte) (r bool) { + return unfolding p.Mem(buf) in + p.ScionPath != nil } ghost -requires acc(p.Mem(buf), _) +requires p.Mem(buf) decreases pure func (p *Path) GetUnderlyingScionPathBuf(buf []byte) []byte { - return unfolding acc(p.Mem(buf), _) in buf[MetadataLen:] + return unfolding p.Mem(buf) in + buf[MetadataLen:] } ghost -pure decreases -func (p *Path) IsValidResultOfDecoding(b []byte, err error) (res bool) { +pure func (p *Path) IsValidResultOfDecoding(b []byte, err error) (res bool) { return true } diff --git a/pkg/slayers/path/hopfield.go b/pkg/slayers/path/hopfield.go index 57452ac26..9b08a9d69 100644 --- a/pkg/slayers/path/hopfield.go +++ b/pkg/slayers/path/hopfield.go @@ -96,8 +96,7 @@ func (h *HopField) DecodeFromBytes(raw []byte) (err error) { h.ConsEgress = binary.BigEndian.Uint16(raw[4:6]) //@ assert forall i int :: { &h.Mac[:][i] } 0 <= i && i < len(h.Mac[:]) ==> //@ &h.Mac[i] == &h.Mac[:][i] - //@ assert forall i int :: { &raw[6:6+MacLen][i] } 0 <= i && i < len(raw[6:6+MacLen]) ==> - //@ &raw[6:6+MacLen][i] == &raw[i+6] + //@ sl.AssertSliceOverlap(raw, 6, 6+MacLen) copy(h.Mac[:], raw[6:6+MacLen] /*@ , R47 @*/) //@ assert forall i int :: {&h.Mac[:][i]} 0 <= i && i < MacLen ==> h.Mac[:][i] == raw[6:6+MacLen][i] //@ assert forall i int :: {&h.Mac[i]} 0 <= i && i < MacLen ==> h.Mac[:][i] == h.Mac[i] @@ -137,9 +136,8 @@ func (h *HopField) SerializeTo(b []byte) (err error) { binary.BigEndian.PutUint16(b[4:6], h.ConsEgress) //@ assert forall i int :: { &b[i] } 0 <= i && i < HopLen ==> acc(&b[i]) //@ assert forall i int :: { &h.Mac[:][i] } 0 <= i && i < len(h.Mac) ==> - //@ &h.Mac[i] == &h.Mac[:][i] - //@ assert forall i int :: { &b[6:6+MacLen][i] }{ &b[i+6] } 0 <= i && i < MacLen ==> - //@ &b[6:6+MacLen][i] == &b[i+6] + //@ &h.Mac[i] == &h.Mac[:][i] + //@ sl.AssertSliceOverlap(b, 6, 6+MacLen) copy(b[6:6+MacLen], h.Mac[:] /*@, R47 @*/) //@ assert forall i int :: {&h.Mac[:][i]} 0 <= i && i < MacLen ==> h.Mac[:][i] == b[6:6+MacLen][i] //@ assert forall i int :: {&h.Mac[i]} 0 <= i && i < MacLen ==> h.Mac[:][i] == h.Mac[i] diff --git a/pkg/slayers/path/hopfield_spec.gobra b/pkg/slayers/path/hopfield_spec.gobra index 26e79f8a1..bd11ca444 100644 --- a/pkg/slayers/path/hopfield_spec.gobra +++ b/pkg/slayers/path/hopfield_spec.gobra @@ -30,30 +30,30 @@ pred (h *HopField) Mem() { ghost decreases -pure func ifsToIO_ifs(ifs uint16) option[io.IO_ifs]{ +pure func ifsToIO_ifs(ifs uint16) option[io.IO_ifs] { return ifs == 0 ? none[io.IO_ifs] : some(io.IO_ifs(ifs)) } ghost decreases -pure func IO_ifsToIfs(ifs option[io.IO_ifs]) uint16{ +pure func IO_ifsToIfs(ifs option[io.IO_ifs]) uint16 { return ifs == none[io.IO_ifs] ? 0 : uint16(get(ifs)) } ghost requires 0 <= start && start <= middle requires middle + HopLen <= end && end <= len(raw) -requires acc(sl.Bytes(raw, start, end), _) +requires sl.Bytes(raw, start, end) decreases pure func BytesToIO_HF(raw [] byte, start int, middle int, end int) (io.IO_HF) { - return let _ := Asserting(forall k int :: {&raw[middle+2:middle+4][k]} 0 <= k && k < 2 ==> &raw[middle+2:middle+4][k] == &raw[middle + 2 + k]) in - let _ := Asserting(forall k int :: {&raw[middle+4:middle+6][k]} 0 <= k && k < 4 ==> &raw[middle+4:middle+6][k] == &raw[middle + 4 + k]) in - let _ := Asserting(forall k int :: {&raw[middle+6:middle+6+MacLen][k]} 0 <= k && k < MacLen ==> &raw[middle+6:middle+6+MacLen][k] == &raw[middle + 6 + k]) in - unfolding acc(sl.Bytes(raw, start, end), _) in - let inif2 := binary.BigEndian.Uint16(raw[middle+2:middle+4]) in - let egif2 := binary.BigEndian.Uint16(raw[middle+4:middle+6]) in - let op_inif2 := ifsToIO_ifs(inif2) in - let op_egif2 := ifsToIO_ifs(egif2) in + return let _ := sl.AssertSliceOverlap(raw, middle+2, middle+4) in + let _ := sl.AssertSliceOverlap(raw, middle+4, middle+6) in + let _ := sl.AssertSliceOverlap(raw, middle+6, middle+6+MacLen) in + unfolding sl.Bytes(raw, start, end) in + let inif2 := binary.BigEndian.Uint16(raw[middle+2:middle+4]) in + let egif2 := binary.BigEndian.Uint16(raw[middle+4:middle+6]) in + let op_inif2 := ifsToIO_ifs(inif2) in + let op_egif2 := ifsToIO_ifs(egif2) in io.IO_HF_ { InIF2: op_inif2, EgIF2: op_egif2, @@ -61,6 +61,51 @@ pure func BytesToIO_HF(raw [] byte, start int, middle int, end int) (io.IO_HF) { } } +// WidenBytesHopField shows the equality between the IO_HF computed +// from raw bytes in slice 'raw' starting at position `offset` with +// the IO_HF obtained from the slice 'raw[start:end]`. +ghost +requires 0 <= start && start <= offset +requires offset + HopLen <= end +requires end <= len(raw) +preserves acc(sl.Bytes(raw, 0, len(raw)), R55) +preserves acc(sl.Bytes(raw[start:end], 0, len(raw[start:end])), R55) +ensures BytesToIO_HF(raw, 0, offset, len(raw)) == + BytesToIO_HF(raw[start:end], 0, offset-start, end-start) +decreases +func WidenBytesHopField(raw []byte, offset int, start int, end int) { + unfold acc(sl.Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.Bytes(raw[start:end], 0, len(raw[start:end])), R56) + hfBytes1 := BytesToIO_HF(raw, 0, offset, len(raw)) + hfBytes2 := BytesToIO_HF(raw[start:end], 0, offset-start, end-start) + + sl.AssertSliceOverlap(raw, start, end) + sl.AssertSliceOverlap(raw[start:end], offset-start+2, offset-start+4) + assert hfBytes1.InIF2 == hfBytes2.InIF2 + sl.AssertSliceOverlap(raw[start:end], offset-start+4, offset-start+6) + assert hfBytes1.EgIF2 == hfBytes2.EgIF2 + sl.AssertSliceOverlap(raw[start:end], offset-start+6, offset-start+6+MacLen) + + fold acc(sl.Bytes(raw, 0, len(raw)), R56) + fold acc(sl.Bytes(raw[start:end], 0, len(raw[start:end])), R56) +} + +// WidenBytesHopField shows the equality between the IO_HF computed +// from raw bytes in slice 'raw' starting at position `offset` with +// the IO_HF obtained from the slice 'raw[offset:offset+HopLen]`. +// It is a special case of `WidenBytesHopField`. +ghost +requires 0 <= offset +requires offset+HopLen <= len(raw) +preserves acc(sl.Bytes(raw, 0, len(raw)), R55) +preserves acc(sl.Bytes(raw[offset:offset+HopLen], 0, HopLen), R55) +ensures BytesToIO_HF(raw, 0, offset, len(raw)) == + BytesToIO_HF(raw[offset:offset+HopLen], 0, 0, HopLen) +decreases +func BytesToAbsHopFieldOffsetEq(raw [] byte, offset int) { + WidenBytesHopField(raw, offset, offset, offset+HopLen) +} + ghost decreases pure func (h HopField) ToIO_HF() (io.IO_HF) { diff --git a/pkg/slayers/path/infofield_spec.gobra b/pkg/slayers/path/infofield_spec.gobra index da554ab37..a992a8701 100644 --- a/pkg/slayers/path/infofield_spec.gobra +++ b/pkg/slayers/path/infofield_spec.gobra @@ -32,46 +32,44 @@ pure func InfoFieldOffset(currINF, headerOffset int) int { ghost requires 0 <= currINF && 0 <= headerOffset requires InfoFieldOffset(currINF, headerOffset) < len(raw) -requires acc(sl.Bytes(raw, 0, len(raw)), R56) +requires sl.Bytes(raw, 0, len(raw)) decreases pure func ConsDir(raw []byte, currINF int, headerOffset int) bool { - return unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in + return unfolding sl.Bytes(raw, 0, len(raw)) in raw[InfoFieldOffset(currINF, headerOffset)] & 0x1 == 0x1 } ghost requires 0 <= currINF && 0 <= headerOffset requires InfoFieldOffset(currINF, headerOffset) < len(raw) -requires acc(sl.Bytes(raw, 0, len(raw)), R56) +requires sl.Bytes(raw, 0, len(raw)) decreases pure func Peer(raw []byte, currINF int, headerOffset int) bool { - return unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in + return unfolding sl.Bytes(raw, 0, len(raw)) in raw[InfoFieldOffset(currINF, headerOffset)] & 0x2 == 0x2 } ghost requires 0 <= currINF && 0 <= headerOffset requires InfoFieldOffset(currINF, headerOffset) + InfoLen < len(raw) -requires acc(sl.Bytes(raw, 0, len(raw)), R56) +requires sl.Bytes(raw, 0, len(raw)) decreases pure func Timestamp(raw []byte, currINF int, headerOffset int) io.IO_ainfo { - return let idx := InfoFieldOffset(currINF, headerOffset) + 4 in - unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in - let _ := Asserting(forall i int :: { &raw[idx+i] } { &raw[idx:idx+4][i] } 0 <= i && i < 4 ==> - &raw[idx+i] == &raw[idx:idx+4][i]) in - io.IO_ainfo(binary.BigEndian.Uint32(raw[idx : idx + 4])) + return let idx := InfoFieldOffset(currINF, headerOffset)+4 in + unfolding sl.Bytes(raw, 0, len(raw)) in + let _ := sl.AssertSliceOverlap(raw, idx, idx+4) in + io.IO_ainfo(binary.BigEndian.Uint32(raw[idx:idx+4])) } ghost requires 0 <= currINF && 0 <= headerOffset requires InfoFieldOffset(currINF, headerOffset) + InfoLen < len(raw) -requires acc(sl.Bytes(raw, 0, len(raw)), R56) +requires sl.Bytes(raw, 0, len(raw)) decreases pure func AbsUinfo(raw []byte, currINF int, headerOffset int) set[io.IO_msgterm] { - return let idx := InfoFieldOffset(currINF, headerOffset) + 2 in - unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in - let _ := Asserting(forall k int :: {&raw[idx:idx+2][k]} 0 <= k && k < 2 ==> - &raw[idx:idx+4][k] == &raw[idx + k]) in + return let idx := InfoFieldOffset(currINF, headerOffset)+2 in + unfolding sl.Bytes(raw, 0, len(raw)) in + let _ := sl.AssertSliceOverlap(raw, idx, idx+2) in AbsUInfoFromUint16(binary.BigEndian.Uint16(raw[idx:idx+2])) } @@ -79,10 +77,10 @@ ghost opaque requires 0 <= middle requires middle+InfoLen <= len(raw) -requires acc(sl.Bytes(raw, 0, len(raw)), _) +requires sl.Bytes(raw, 0, len(raw)) decreases pure func BytesToAbsInfoField(raw [] byte, middle int) (io.AbsInfoField) { - return unfolding acc(sl.Bytes(raw, 0, len(raw)), _) in + return unfolding sl.Bytes(raw, 0, len(raw)) in BytesToAbsInfoFieldHelper(raw, middle) } @@ -90,28 +88,55 @@ ghost requires 0 <= middle requires middle+InfoLen <= len(raw) requires forall i int :: { &raw[i] } middle <= i && i < len(raw) ==> - acc(&raw[i], _) + acc(&raw[i]) decreases pure func BytesToAbsInfoFieldHelper(raw [] byte, middle int) (io.AbsInfoField) { - return let _ := Asserting(forall k int :: {&raw[middle+2:middle+4][k]} 0 <= k && k < 2 ==> - &raw[middle+2:middle+4][k] == &raw[middle+2 + k]) in - let _ := Asserting(forall k int :: {&raw[middle+4:middle+8][k]} 0 <= k && k < 4 ==> - &raw[middle+4:middle+8][k] == &raw[middle+4 + k]) in - io.AbsInfoField(io.AbsInfoField_{ - AInfo : io.IO_ainfo(binary.BigEndian.Uint32(raw[middle+4:middle+8])), - UInfo : AbsUInfoFromUint16(binary.BigEndian.Uint16(raw[middle+2:middle+4])), - ConsDir : raw[middle] & 0x1 == 0x1, - Peer : raw[middle] & 0x2 == 0x2, - }) + return let _ := sl.AssertSliceOverlap(raw, middle+2, middle+4) in + let _ := sl.AssertSliceOverlap(raw, middle+4, middle+8) in + io.AbsInfoField_{ + AInfo: io.IO_ainfo(binary.BigEndian.Uint32(raw[middle+4:middle+8])), + UInfo: AbsUInfoFromUint16(binary.BigEndian.Uint16(raw[middle+2:middle+4])), + ConsDir: raw[middle] & 0x1 == 0x1, + Peer: raw[middle] & 0x2 == 0x2, + } +} + +ghost +requires 0 <= middle +requires middle+InfoLen <= len(raw) +preserves acc(sl.Bytes(raw, 0, len(raw)), R55) +preserves acc(sl.Bytes(raw[middle:middle+InfoLen], 0, InfoLen), R55) +ensures BytesToAbsInfoField(raw, middle) == + BytesToAbsInfoField(raw[middle:middle+InfoLen], 0) +decreases +func BytesToAbsInfoFieldOffsetEq(raw [] byte, middle int) { + start := middle + end := middle+InfoLen + unfold acc(sl.Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.Bytes(raw[start:end], 0, InfoLen), R56) + absInfo1 := reveal BytesToAbsInfoField(raw, start) + absInfo2 := reveal BytesToAbsInfoField(raw[start:end], 0) + + assert absInfo1.ConsDir == absInfo2.ConsDir + assert absInfo1.Peer == absInfo2.Peer + sl.AssertSliceOverlap(raw, start, end) + sl.AssertSliceOverlap(raw[start:end], 4, 8) + assert absInfo1.AInfo == absInfo2.AInfo + sl.AssertSliceOverlap(raw[start:end], 2, 4) + assert absInfo1.UInfo == absInfo2.UInfo + assert absInfo1 == absInfo2 + + fold acc(sl.Bytes(raw, 0, len(raw)), R56) + fold acc(sl.Bytes(raw[start:end], 0, InfoLen), R56) } ghost decreases pure func (inf InfoField) ToAbsInfoField() (io.AbsInfoField) { - return io.AbsInfoField(io.AbsInfoField_{ - AInfo : io.IO_ainfo(inf.Timestamp), - UInfo : AbsUInfoFromUint16(inf.SegID), - ConsDir : inf.ConsDir, - Peer : inf.Peer, - }) + return io.AbsInfoField_{ + AInfo: io.IO_ainfo(inf.Timestamp), + UInfo: AbsUInfoFromUint16(inf.SegID), + ConsDir: inf.ConsDir, + Peer: inf.Peer, + } } \ No newline at end of file diff --git a/pkg/slayers/path/io_msgterm_spec.gobra b/pkg/slayers/path/io_msgterm_spec.gobra index 601db4004..b356b426e 100644 --- a/pkg/slayers/path/io_msgterm_spec.gobra +++ b/pkg/slayers/path/io_msgterm_spec.gobra @@ -39,7 +39,7 @@ pure func AbsMac(mac [MacLen]byte) (io.IO_msgterm) // involved for accessing exclusive arrays. ghost requires MacLen <= len(mac) -requires forall i int :: { &mac[i] } 0 <= i && i < MacLen ==> acc(&mac[i], _) +requires forall i int :: { &mac[i] } 0 <= i && i < MacLen ==> acc(&mac[i]) ensures len(res) == MacLen ensures forall i int :: { res[i] } 0 <= i && i < MacLen ==> mac[i] == res[i] decreases diff --git a/pkg/slayers/path/onehop/onehop_spec.gobra b/pkg/slayers/path/onehop/onehop_spec.gobra index 84d42dc75..c0a542a4d 100644 --- a/pkg/slayers/path/onehop/onehop_spec.gobra +++ b/pkg/slayers/path/onehop/onehop_spec.gobra @@ -44,24 +44,23 @@ func (p *Path) DowngradePerm(buf []byte) { } ghost -requires acc(o.Mem(ub), _) +requires o.Mem(ub) ensures b decreases pure func (o *Path) InferSizeUb(ghost ub []byte) (b bool) { - return unfolding acc(o.Mem(ub), _) in o.LenSpec(ub) <= len(ub) + return unfolding o.Mem(ub) in + o.LenSpec(ub) <= len(ub) } ghost -pure decreases -func (p *Path) IsValidResultOfDecoding(b []byte, err error) (res bool) { +pure func (p *Path) IsValidResultOfDecoding(b []byte, err error) bool { return true } ghost -pure decreases -func (p *Path) LenSpec(ghost ub []byte) (l int) { +pure func (p *Path) LenSpec(ghost ub []byte) int { return PathLen } diff --git a/pkg/slayers/path/path.go b/pkg/slayers/path/path.go index 951af3ab6..ebe40b319 100644 --- a/pkg/slayers/path/path.go +++ b/pkg/slayers/path/path.go @@ -41,7 +41,6 @@ func init() { // (VerifiedSCION) ghost initialization code to establish the PathPackageMem predicate. // @ assert acc(®isteredPaths) // @ assert acc(&strictDecoding) - // @ assert forall t Type :: { registeredPaths[t] } 0 <= t && t < maxPathType ==> !registeredPaths[t].inUse // @ fold PathPackageMem() } @@ -91,7 +90,7 @@ type Path interface { //@ ghost //@ pure //@ requires Mem(b) - //@ requires acc(sl.Bytes(b, 0, len(b)), R42) + //@ requires sl.Bytes(b, 0, len(b)) //@ decreases //@ IsValidResultOfDecoding(b []byte, err error) (res bool) // Reverse reverses a path such that it can be used in the reversed direction. @@ -105,7 +104,7 @@ type Path interface { Reverse( /*@ ghost ub []byte @*/ ) (p Path, e error) //@ ghost //@ pure - //@ requires acc(Mem(ub), _) + //@ requires Mem(ub) //@ ensures 0 <= l //@ decreases //@ LenSpec(ghost ub []byte) (l int) @@ -117,7 +116,7 @@ type Path interface { Len( /*@ ghost ub []byte @*/ ) (l int) // Type returns the type of a path. //@ pure - //@ requires acc(Mem(ub), _) + //@ requires Mem(ub) //@ decreases Type( /*@ ghost ub []byte @*/ ) Type //@ ghost @@ -257,8 +256,8 @@ func (p *rawPath) Len( /*@ ghost ub []byte @*/ ) (l int) { } // @ pure -// @ requires acc(p.Mem(ub), _) +// @ requires p.Mem(ub) // @ decreases func (p *rawPath) Type( /*@ ghost ub []byte @*/ ) Type { - return /*@ unfolding acc(p.Mem(ub), _) in @*/ p.pathType + return /*@ unfolding p.Mem(ub) in @*/ p.pathType } diff --git a/pkg/slayers/path/path_spec.gobra b/pkg/slayers/path/path_spec.gobra index 0963b1cd0..7af361e66 100644 --- a/pkg/slayers/path/path_spec.gobra +++ b/pkg/slayers/path/path_spec.gobra @@ -38,19 +38,18 @@ func (p *rawPath) DowngradePerm(ghost buf []byte) { } ghost -pure decreases -func (p *rawPath) IsValidResultOfDecoding(b []byte, err error) (res bool) { +pure func (p *rawPath) IsValidResultOfDecoding(b []byte, err error) (res bool) { return true } ghost -pure -requires acc(p.Mem(ub), _) +requires p.Mem(ub) ensures 0 <= l decreases -func (p *rawPath) LenSpec(ghost ub []byte) (l int) { - return unfolding acc(p.Mem(ub), _) in len(p.raw) +pure func (p *rawPath) LenSpec(ghost ub []byte) (l int) { + return unfolding p.Mem(ub) in + len(p.raw) } (*rawPath) implements Path @@ -68,27 +67,30 @@ pred PathPackageMem() { ghost requires 0 <= t && t < maxPathType -requires acc(PathPackageMem(), _) -ensures res == unfolding acc(PathPackageMem(), _) in registeredPaths[t].inUse +requires PathPackageMem() decreases pure func Registered(t Type) (res bool) { - return unfolding acc(PathPackageMem(), _) in registeredPaths[t].inUse + return unfolding PathPackageMem() in + registeredPaths[t].inUse } ghost requires 0 <= t && t < maxPathType -requires acc(PathPackageMem(), _) +requires PathPackageMem() decreases pure func GetType(t Type) (res Metadata) { - return unfolding acc(PathPackageMem(), _) in registeredPaths[t].Metadata + return unfolding PathPackageMem() in + registeredPaths[t].Metadata } ghost -requires acc(PathPackageMem(), _) -ensures b == unfolding acc(PathPackageMem(), _) in strictDecoding +requires PathPackageMem() +// TODO: drop +// ensures b == unfolding acc(PathPackageMem(), _) in strictDecoding decreases pure func IsStrictDecoding() (b bool) { - return unfolding acc(PathPackageMem(), _) in strictDecoding + return unfolding PathPackageMem() in + strictDecoding } // without passing `writePerm` explicitely below, we run into diff --git a/pkg/slayers/path/scion/base.go b/pkg/slayers/path/scion/base.go index cbd0e2078..1754bd17a 100644 --- a/pkg/slayers/path/scion/base.go +++ b/pkg/slayers/path/scion/base.go @@ -227,11 +227,11 @@ func (s *Base) infIndexForHF(hf uint8) (r uint8) { // store it, based on the metadata. The actual number of bytes available to contain it // can be inferred from the common header field HdrLen. It may or may not be consistent. // @ pure -// @ requires acc(s.Mem(), _) +// @ requires s.Mem() // @ ensures r >= MetaLen // @ decreases func (s *Base) Len() (r int) { - return /*@ unfolding acc(s.Mem(), _) in @*/ MetaLen + s.NumINF*path.InfoLen + s.NumHops*path.HopLen + return /*@ unfolding s.Mem() in @*/ MetaLen + s.NumINF*path.InfoLen + s.NumHops*path.HopLen } // Type returns the type of the path. diff --git a/pkg/slayers/path/scion/base_spec.gobra b/pkg/slayers/path/scion/base_spec.gobra index ea5db53d5..764ff6991 100644 --- a/pkg/slayers/path/scion/base_spec.gobra +++ b/pkg/slayers/path/scion/base_spec.gobra @@ -110,37 +110,41 @@ pure func (b Base) NumsCompatibleWithSegLen() bool { } ghost -requires acc(b.Mem(), _) +requires b.Mem() ensures 0 <= res && res <= 3 decreases pure func (b *Base) GetNumINF() (res int) { - return unfolding acc(b.Mem(), _) in b.NumINF + return unfolding b.Mem() in + b.NumINF } ghost -requires acc(b.Mem(), _) +requires b.Mem() ensures 0 <= res decreases pure func (b *Base) GetNumHops() (res int) { - return unfolding acc(b.Mem(), _) in b.NumHops + return unfolding b.Mem() in + b.NumHops } ghost -requires acc(s.Mem(), _) +requires s.Mem() decreases pure func (s *Base) GetMetaHdr() MetaHdr { - return unfolding acc(s.Mem(), _) in s.PathMeta + return unfolding s.Mem() in + s.PathMeta } ghost -requires acc(s.Mem(), _) +requires s.Mem() decreases pure func (s *Base) GetBase() Base { - return unfolding acc(s.Mem(), _) in *s + return unfolding s.Mem() in + *s } ghost -requires acc(s.Mem(), _) +requires s.Mem() decreases pure func (s *Base) GetCurrHF() uint8 { return s.GetMetaHdr().CurrHF @@ -238,7 +242,7 @@ pure func DecodedFrom(line uint32) MetaHdr { } ghost -requires acc(sl.Bytes(b, 0, len(b)), _) +requires sl.Bytes(b, 0, len(b)) decreases pure func (m MetaHdr) DecodeFromBytesSpec(b []byte) bool { return MetaLen <= len(b) && @@ -255,11 +259,11 @@ pure func (m MetaHdr) DecodeFromBytesSpec(b []byte) bool { } ghost -requires acc(s.Mem(), _) -requires acc(sl.Bytes(b, 0, len(b)), _) +requires s.Mem() +requires sl.Bytes(b, 0, len(b)) decreases pure func (s *Base) DecodeFromBytesSpec(b []byte) bool { - return unfolding acc(s.Mem(), _) in + return unfolding s.Mem() in s.PathMeta.DecodeFromBytesSpec(b) } @@ -290,7 +294,7 @@ pure func (m MetaHdr) SerializedToLine() uint32 { } ghost -requires acc(sl.Bytes(b, 0, len(b)), _) +requires sl.Bytes(b, 0, len(b)) decreases pure func (m MetaHdr) SerializeToSpec(b []byte) bool { return MetaLen <= len(b) && @@ -304,7 +308,7 @@ pure func (m MetaHdr) SerializeToSpec(b []byte) bool { } ghost -requires acc(sl.Bytes(ub, 0, len(ub)), R56) +requires sl.Bytes(ub, 0, len(ub)) decreases pure func (s Base) EqAbsHeader(ub []byte) bool { // we compute the sublice ub[:MetaLen] inside this function instead @@ -316,11 +320,11 @@ pure func (s Base) EqAbsHeader(ub []byte) bool { } ghost -requires acc(sl.Bytes(ub, 0, len(ub)), _) +requires sl.Bytes(ub, 0, len(ub)) decreases pure func (s MetaHdr) EqAbsHeader(ub []byte) bool { return MetaLen <= len(ub) && - unfolding acc(sl.Bytes(ub, 0, len(ub)), _) in + unfolding sl.Bytes(ub, 0, len(ub)) in s == DecodedFrom(binary.BigEndian.Uint32(ub[:MetaLen])) } diff --git a/pkg/slayers/path/scion/decoded_spec.gobra b/pkg/slayers/path/scion/decoded_spec.gobra index 34e98ca0c..9d4dc3898 100644 --- a/pkg/slayers/path/scion/decoded_spec.gobra +++ b/pkg/slayers/path/scion/decoded_spec.gobra @@ -23,9 +23,8 @@ import ( ) ghost -pure decreases -func (p *Decoded) IsValidResultOfDecoding(b []byte, err error) (res bool) { +pure func (p *Decoded) IsValidResultOfDecoding(b []byte, err error) bool { return true } @@ -66,12 +65,11 @@ func (d *Decoded) Len(ghost ubuf []byte) (l int) { } ghost -pure -requires acc(d.Mem(ub), _) -ensures unfolding acc(d.Mem(ub), _) in l == d.Base.Len() +requires d.Mem(ub) decreases -func (d *Decoded) LenSpec(ghost ub []byte) (l int) { - return unfolding acc(d.Mem(ub), _) in d.Base.Len() +pure func (d *Decoded) LenSpec(ghost ub []byte) int { + return unfolding d.Mem(ub) in + d.Base.Len() } /** @@ -80,12 +78,11 @@ func (d *Decoded) LenSpec(ghost ub []byte) (l int) { * Unfortunately, Gobra does not fully support them yet, so we * introduced this method which acts as a wrapper. */ -pure -requires acc(d.Mem(ubuf), _) -ensures unfolding acc(d.Mem(ubuf), _) in t == d.Base.Type() +requires d.Mem(ubuf) decreases -func (d *Decoded) Type(ghost ubuf []byte) (t path.Type) { - return unfolding acc(d.Mem(ubuf), _) in d.Base.Type() +pure func (d *Decoded) Type(ghost ubuf []byte) path.Type { + return unfolding d.Mem(ubuf) in + d.Base.Type() } /** @@ -128,48 +125,53 @@ func (d *Decoded) IncPath(ghost ubuf []byte) (e error) { } ghost -requires acc(d.Mem(ub), _) +requires d.Mem(ub) decreases -pure func (d *Decoded) GetNumINF(ub []byte) (res int) { - return unfolding acc(d.Mem(ub), _) in d.Base.GetNumINF() +pure func (d *Decoded) GetNumINF(ub []byte) int { + return unfolding d.Mem(ub) in + d.Base.GetNumINF() } ghost -requires acc(d.Mem(ub), _) +requires d.Mem(ub) decreases -pure func (d *Decoded) GetNumHops(ub []byte) (res int) { - return unfolding acc(d.Mem(ub), _) in d.Base.GetNumHops() +pure func (d *Decoded) GetNumHops(ub []byte) int { + return unfolding d.Mem(ub) in + d.Base.GetNumHops() } ghost -requires acc(s.Mem(ub), _) +requires s.Mem(ub) decreases pure func (s *Decoded) GetMetaHdr(ub []byte) MetaHdr { - return unfolding acc(s.Mem(ub), _) in s.Base.GetMetaHdr() + return unfolding s.Mem(ub) in + s.Base.GetMetaHdr() } /**** End of Stubs ****/ /**** Auxiliary Functions ****/ -requires acc(d.Mem(ubuf), _) +requires d.Mem(ubuf) decreases pure func (d *Decoded) getLenInfoFields(ubuf []byte) int { - return unfolding acc(d.Mem(ubuf), _) in len(d.InfoFields) + return unfolding d.Mem(ubuf) in + len(d.InfoFields) } -requires acc(d.Mem(ubuf), _) +requires d.Mem(ubuf) decreases pure func (d *Decoded) getLenHopFields(ubuf []byte) int { - return unfolding acc(d.Mem(ubuf), _) in len(d.HopFields) + return unfolding d.Mem(ubuf) in + len(d.HopFields) } ghost -requires acc(d.Mem(ubuf), _) +requires d.Mem(ubuf) decreases pure func (d *Decoded) GetBase(ubuf []byte) Base { - return unfolding acc(d.Mem(ubuf), _) in - (unfolding acc(d.Base.Mem(), _) in d.Base) + return unfolding d.Mem(ubuf) in + (unfolding d.Base.Mem() in d.Base) } /**** End of Auxiliary Functions ****/ diff --git a/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra b/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra index f8c436c68..2c6f7cea2 100644 --- a/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra +++ b/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra @@ -221,7 +221,7 @@ opaque requires 0 < SegLen requires 0 <= currHfIdx && currHfIdx <= SegLen requires SegLen * path.HopLen == len(hopfields) -requires acc(sl.Bytes(hopfields, 0, len(hopfields)), R56) +requires sl.Bytes(hopfields, 0, len(hopfields)) decreases pure func CurrSegWithInfo(hopfields []byte, currHfIdx int, SegLen int, inf io.AbsInfoField) io.IO_seg3 { return segment(hopfields, 0, currHfIdx, inf.AInfo, inf.UInfo, inf.ConsDir, inf.Peer, SegLen) @@ -241,7 +241,7 @@ requires (currInfIdx == 1 && segs.Seg2Len > 0) || let end := HopfieldsEndIdx(currInfIdx, segs) in inf != none[io.AbsInfoField] && len(hopfields) == end - start && - acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) + sl.Bytes(hopfields, 0, len(hopfields)) decreases pure func LeftSegWithInfo( hopfields []byte, @@ -268,7 +268,7 @@ requires (currInfIdx == 0 && segs.Seg2Len > 0) || let end := HopfieldsEndIdx(currInfIdx, segs) in inf != none[io.AbsInfoField] && len(hopfields) == end - start && - acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) + sl.Bytes(hopfields, 0, len(hopfields)) decreases pure func RightSegWithInfo( hopfields []byte, @@ -295,7 +295,7 @@ requires (segs.Seg2Len > 0 && segs.Seg3Len > 0 && let end := HopfieldsEndIdx(currInfIdx, segs) in inf != none[io.AbsInfoField] && len(hopfields) == end - start && - acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) + sl.Bytes(hopfields, 0, len(hopfields)) decreases pure func MidSegWithInfo( hopfields []byte, @@ -320,7 +320,7 @@ requires 0 <= currInfIdx && currInfIdx < 3 preserves acc(sl.Bytes(raw, 0, len(raw)), R50) preserves acc(sl.Bytes(raw[offset:offset + SegLen * path.HopLen], 0, SegLen * path.HopLen), R50) preserves acc(sl.Bytes(InfofieldByteSlice(raw, currInfIdx), 0, path.InfoLen), R50) -ensures let inf := path.BytesToAbsInfoField(InfofieldByteSlice(raw, currInfIdx), 0) in +ensures let inf := path.BytesToAbsInfoField(InfofieldByteSlice(raw, currInfIdx), 0) in CurrSegWithInfo(raw[offset:offset + SegLen * path.HopLen], currHfIdx, SegLen, inf) == CurrSeg(raw, offset, currInfIdx, currHfIdx, SegLen, MetaLen) decreases @@ -330,8 +330,9 @@ func CurrSegEquality(raw []byte, offset int, currInfIdx int, currHfIdx int, SegL infOffset := path.InfoFieldOffset(currInfIdx, MetaLen) unfold acc(sl.Bytes(raw, 0, len(raw)), R56) unfold acc(sl.Bytes(infoBytes, 0, path.InfoLen), R56) - assert reveal path.BytesToAbsInfoField(raw, infOffset) == - reveal path.BytesToAbsInfoField(infoBytes, 0) + path.BytesToAbsInfoFieldOffsetEq(raw, infOffset) + assert path.BytesToAbsInfoField(raw, infOffset) == + path.BytesToAbsInfoField(infoBytes, 0) reveal CurrSeg(raw, offset, currInfIdx, currHfIdx, SegLen, MetaLen) reveal CurrSegWithInfo(raw[offset:offset + SegLen * path.HopLen], currHfIdx, SegLen, inf) fold acc(sl.Bytes(raw, 0, len(raw)), R56) @@ -363,13 +364,13 @@ ghost requires segs.Valid() requires PktLen(segs, MetaLen) <= len(raw) requires 1 <= currInfIdx && currInfIdx < 4 -requires acc(sl.Bytes(raw, 0, len(raw)), R49) +requires sl.Bytes(raw, 0, len(raw)) requires (currInfIdx == 1 && segs.Seg2Len > 0) || (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> let infoBytes := InfofieldByteSlice(raw, currInfIdx) in let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in - acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && - acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) + sl.Bytes(infoBytes, 0, path.InfoLen) && + sl.Bytes(hopBytes, 0, len(hopBytes)) decreases pure func LeftSegEqualitySpec(raw []byte, currInfIdx int, segs io.SegLens) bool { return (currInfIdx == 1 && segs.Seg2Len > 0) || @@ -424,13 +425,13 @@ ghost requires segs.Valid() requires PktLen(segs, MetaLen) <= len(raw) requires -1 <= currInfIdx && currInfIdx < 2 -requires acc(sl.Bytes(raw, 0, len(raw)), R49) +requires sl.Bytes(raw, 0, len(raw)) requires (currInfIdx == 0 && segs.Seg2Len > 0) || (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> let infoBytes := InfofieldByteSlice(raw, currInfIdx) in let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in - acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && - acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) + sl.Bytes(infoBytes, 0, path.InfoLen) && + sl.Bytes(hopBytes, 0, len(hopBytes)) decreases pure func RightSegEqualitySpec(raw []byte, currInfIdx int, segs io.SegLens) bool { return (currInfIdx == 0 && segs.Seg2Len > 0) || @@ -485,13 +486,13 @@ ghost requires segs.Valid() requires PktLen(segs, MetaLen) <= len(raw) requires 2 <= currInfIdx && currInfIdx < 5 -requires acc(sl.Bytes(raw, 0, len(raw)), R49) +requires sl.Bytes(raw, 0, len(raw)) requires (segs.Seg2Len > 0 && segs.Seg3Len > 0 && (currInfIdx == 2 || currInfIdx == 4)) ==> let infoBytes := InfofieldByteSlice(raw, currInfIdx) in let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in - acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && - acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) + sl.Bytes(infoBytes, 0, path.InfoLen) && + sl.Bytes(hopBytes, 0, len(hopBytes)) decreases pure func MidSegEqualitySpec(raw []byte, currInfIdx int, segs io.SegLens) bool { return (segs.Seg2Len > 0 && segs.Seg3Len > 0 && @@ -551,24 +552,24 @@ func MidSegEquality(raw []byte, currInfIdx int, segs io.SegLens) { ghost requires 0 <= currHfIdx && currHfIdx < segLen requires segLen * path.HopLen == len(hopfields) -requires acc(sl.Bytes(hopfields, 0, len(hopfields)), R50) +requires sl.Bytes(hopfields, 0, len(hopfields)) requires let currHfStart := currHfIdx * path.HopLen in - let currHfEnd := currHfStart + path.HopLen in - acc(sl.Bytes(hopfields[:currHfStart], 0, currHfStart), R50) && - acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), R50) && - acc(sl.Bytes(hopfields[currHfEnd:], 0, (segLen - currHfIdx - 1) * path.HopLen), R50) + let currHfEnd := currHfStart + path.HopLen in + sl.Bytes(hopfields[:currHfStart], 0, currHfStart) && + sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen) && + sl.Bytes(hopfields[currHfEnd:], 0, (segLen - currHfIdx - 1) * path.HopLen) decreases pure func BytesStoreCurrSeg(hopfields []byte, currHfIdx int, segLen int, inf io.AbsInfoField) bool { return let currseg := CurrSegWithInfo(hopfields, currHfIdx, segLen, inf) in - let currHfStart := currHfIdx * path.HopLen in - let currHfEnd := currHfStart + path.HopLen in + let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in len(currseg.Future) > 0 && currseg.Future[0] == path.BytesToIO_HF(hopfields[currHfStart:currHfEnd], 0, 0, path.HopLen) && - currseg.Future[1:] == hopFields(hopfields[currHfEnd:], 0, 0, (segLen - currHfIdx - 1)) && - currseg.Past == segPast(hopFields(hopfields[:currHfStart], 0, 0, currHfIdx)) && - currseg.History == segHistory(hopFields(hopfields[:currHfStart], 0, 0, currHfIdx)) && - currseg.AInfo == inf.AInfo && - currseg.UInfo == inf.UInfo && + currseg.Future[1:] == hopFields(hopfields[currHfEnd:], 0, 0, (segLen - currHfIdx - 1)) && + currseg.Past == segPast(hopFields(hopfields[:currHfStart], 0, 0, currHfIdx)) && + currseg.History == segHistory(hopFields(hopfields[:currHfStart], 0, 0, currHfIdx)) && + currseg.AInfo == inf.AInfo && + currseg.UInfo == inf.UInfo && currseg.ConsDir == inf.ConsDir && currseg.Peer == inf.Peer } diff --git a/pkg/slayers/path/scion/raw.go b/pkg/slayers/path/scion/raw.go index 259258cd9..2772de12a 100644 --- a/pkg/slayers/path/scion/raw.go +++ b/pkg/slayers/path/scion/raw.go @@ -351,14 +351,7 @@ func (s *Raw) GetInfoField(idx int /*@, ghost ubuf []byte @*/) (ifield path.Info return path.InfoField{}, err } //@ sl.CombineRange_Bytes(ubuf, infOffset, infOffset+path.InfoLen, R21) - //@ unfold acc(sl.Bytes(ubuf, 0, len(ubuf)), R56) - //@ unfold acc(sl.Bytes(ubuf[infOffset : infOffset+path.InfoLen], 0, path.InfoLen), R56) - //@ assert reveal path.BytesToAbsInfoField(ubuf, infOffset) == - //@ reveal path.BytesToAbsInfoField(ubuf[infOffset : infOffset+path.InfoLen], 0) - //@ assert info.ToAbsInfoField() == - //@ reveal path.BytesToAbsInfoField(ubuf, infOffset) - //@ fold acc(sl.Bytes(ubuf, 0, len(ubuf)), R56) - //@ fold acc(sl.Bytes(ubuf[infOffset : infOffset+path.InfoLen], 0, path.InfoLen), R56) + //@ path.BytesToAbsInfoFieldOffsetEq(ubuf, infOffset) //@ sl.CombineRange_Bytes(ubuf, infOffset, infOffset+path.InfoLen, R21) //@ fold acc(s.Mem(ubuf), R11) //@ assert reveal s.CorrectlyDecodedInfWithIdx(ubuf, idx, info) @@ -493,12 +486,7 @@ func (s *Raw) GetHopField(idx int /*@, ghost ubuf []byte @*/) (res path.HopField } //@ unfold hop.Mem() //@ sl.CombineRange_Bytes(ubuf, hopOffset, hopOffset+path.HopLen, R21) - //@ unfold acc(sl.Bytes(ubuf, 0, len(ubuf)), R56) - //@ unfold acc(sl.Bytes(ubuf[hopOffset : hopOffset+path.HopLen], 0, path.HopLen), R56) - //@ assert hop.ToIO_HF() == - //@ path.BytesToIO_HF(ubuf, 0, hopOffset, len(ubuf)) - //@ fold acc(sl.Bytes(ubuf, 0, len(ubuf)), R56) - //@ fold acc(sl.Bytes(ubuf[hopOffset : hopOffset+path.HopLen], 0, path.HopLen), R56) + //@ path.BytesToAbsHopFieldOffsetEq(ubuf, hopOffset) //@ sl.CombineRange_Bytes(ubuf, hopOffset, hopOffset+path.HopLen, R21) //@ fold acc(s.Mem(ubuf), R11) //@ assert reveal s.CorrectlyDecodedHfWithIdx(ubuf, idx, hop) @@ -628,10 +616,10 @@ func (s *Raw) SetHopField(hop path.HopField, idx int /*@, ghost ubuf []byte @*/) // IsFirstHop returns whether the current hop is the first hop on the path. // @ pure -// @ requires acc(s.Mem(ubuf), _) +// @ requires s.Mem(ubuf) // @ decreases func (s *Raw) IsFirstHop( /*@ ghost ubuf []byte @*/ ) bool { - return /*@ unfolding acc(s.Mem(ubuf), _) in (unfolding acc(s.Base.Mem(), _) in @*/ s.PathMeta.CurrHF == 0 /*@ ) @*/ + return /*@ unfolding s.Mem(ubuf) in (unfolding s.Base.Mem() in @*/ s.PathMeta.CurrHF == 0 /*@ ) @*/ } // IsPenultimateHop returns whether the current hop is the penultimate hop on the path. diff --git a/pkg/slayers/path/scion/raw_spec.gobra b/pkg/slayers/path/scion/raw_spec.gobra index a5d40da62..856ebd813 100644 --- a/pkg/slayers/path/scion/raw_spec.gobra +++ b/pkg/slayers/path/scion/raw_spec.gobra @@ -42,11 +42,10 @@ pred (s *Raw) Mem(buf []byte) { (*Raw) implements path.Path ghost -pure -requires acc(s.Mem(buf), _) -requires acc(sl.Bytes(buf, 0, len(buf)), R42) +requires s.Mem(buf) +requires sl.Bytes(buf, 0, len(buf)) decreases -func (s *Raw) IsValidResultOfDecoding(buf []byte, err error) (res bool) { +pure func (s *Raw) IsValidResultOfDecoding(buf []byte, err error) bool { return let base := s.GetBase(buf) in base.EqAbsHeader(buf) && base.WeaklyValid() @@ -59,12 +58,11 @@ func (s *Raw) IsValidResultOfDecoding(buf []byte, err error) (res bool) { * Unfortunately, Gobra does not fully support them yet, so we * introduced this wrapper method which acts as a wrapper. */ -pure -requires acc(s.Mem(buf), _) -ensures unfolding acc(s.Mem(buf), _) in t == s.Base.Type() +requires s.Mem(buf) decreases -func (s *Raw) Type(ghost buf []byte) (t path.Type) { - return unfolding acc(s.Mem(buf), _) in s.Base.Type() +pure func (s *Raw) Type(ghost buf []byte) path.Type { + return unfolding s.Mem(buf) in + s.Base.Type() } /** @@ -81,12 +79,11 @@ func (s *Raw) Len(ghost buf []byte) (l int) { } ghost -pure -requires acc(s.Mem(ub), _) -ensures unfolding acc(s.Mem(ub), _) in l == s.Base.Len() +requires s.Mem(ub) decreases -func (s *Raw) LenSpec(ghost ub []byte) (l int) { - return unfolding acc(s.Mem(ub), _) in s.Base.Len() +pure func (s *Raw) LenSpec(ghost ub []byte) int { + return unfolding s.Mem(ub) in + s.Base.Len() } /** @@ -150,55 +147,59 @@ func (r *Raw) Widen(ubuf1, ubuf2 []byte) { /**** Start of helpful pure functions ****/ ghost -requires acc(r.Mem(ub), _) +requires r.Mem(ub) decreases pure func (r *Raw) GetBase(ub []byte) Base { - return unfolding acc(r.Mem(ub), _) in r.Base.GetBase() + return unfolding r.Mem(ub) in + r.Base.GetBase() } ghost -requires acc(r.Mem(ub), _) +requires r.Mem(ub) decreases pure func (r *Raw) GetNumINF(ghost ub []byte) int { - return unfolding acc(r.Mem(ub), _) in (unfolding acc(r.Base.Mem(), _) in r.NumINF) + return unfolding r.Mem(ub) in + (unfolding r.Base.Mem() in r.NumINF) } ghost -requires acc(r.Mem(ub), _) +requires r.Mem(ub) decreases pure func (r *Raw) GetNumHops(ghost ub []byte) int { - return unfolding acc(r.Mem(ub), _) in (unfolding acc(r.Base.Mem(), _) in r.NumHops) + return unfolding r.Mem(ub) in + (unfolding r.Base.Mem() in r.NumHops) } ghost -requires acc(r.Mem(ub), _) +requires r.Mem(ub) decreases pure func (r *Raw) GetCurrINF(ghost ub []byte) uint8 { - return unfolding acc(r.Mem(ub), _) in (unfolding acc(r.Base.Mem(), _) in r.PathMeta.CurrINF) + return unfolding r.Mem(ub) in + (unfolding r.Base.Mem() in r.PathMeta.CurrINF) } ghost -requires acc(r.Mem(ub), _) +requires r.Mem(ub) decreases pure func (r *Raw) GetCurrHF(ghost ub []byte) uint8 { - return unfolding acc(r.Mem(ub), _) in - (unfolding acc(r.Base.Mem(), _) in r.PathMeta.CurrHF) + return unfolding r.Mem(ub) in + (unfolding r.Base.Mem() in r.PathMeta.CurrHF) } ghost -pure -requires acc(s.Mem(buf), _) +requires s.Mem(buf) decreases -func (s *Raw) RawBufferMem(ghost buf []byte) []byte { - return unfolding acc(s.Mem(buf), _) in s.Raw +pure func (s *Raw) RawBufferMem(ghost buf []byte) []byte { + return unfolding s.Mem(buf) in + s.Raw } ghost -pure -requires acc(s.NonInitMem(), _) +requires s.NonInitMem() decreases -func (s *Raw) RawBufferNonInitMem() []byte { - return unfolding acc(s.NonInitMem(), _) in s.Raw +pure func (s *Raw) RawBufferNonInitMem() []byte { + return unfolding s.NonInitMem() in + s.Raw } /**** End of helpful pure functions ****/ @@ -219,7 +220,7 @@ ghost requires 0 <= offset requires 0 <= currHfIdx && currHfIdx <= segLen requires offset + path.HopLen * segLen <= len(raw) -requires acc(sl.Bytes(raw, 0, len(raw)), R56) +requires sl.Bytes(raw, 0, len(raw)) ensures len(res) == segLen - currHfIdx decreases segLen - currHfIdx pure func hopFields( @@ -251,14 +252,14 @@ pure func segHistory(hopfields seq[io.IO_HF]) (res seq[io.IO_ahi]) { } ghost +requires sl.Bytes(raw, 0, len(raw)) requires 0 <= offset requires 0 < segLen requires 0 <= currHfIdx && currHfIdx <= segLen requires offset + path.HopLen * segLen <= len(raw) -requires acc(sl.Bytes(raw, 0, len(raw)), R56) -ensures len(res.Future) == segLen - currHfIdx -ensures len(res.History) == currHfIdx -ensures len(res.Past) == currHfIdx +ensures len(res.Future) == segLen - currHfIdx +ensures len(res.History) == currHfIdx +ensures len(res.Past) == currHfIdx decreases pure func segment(raw []byte, offset int, @@ -282,13 +283,13 @@ pure func segment(raw []byte, ghost opaque +requires sl.Bytes(raw, 0, len(raw)) requires 0 <= headerOffset requires path.InfoFieldOffset(currInfIdx, headerOffset) + path.InfoLen <= offset requires 0 < segLen requires offset + path.HopLen * segLen <= len(raw) requires 0 <= currHfIdx && currHfIdx <= segLen requires 0 <= currInfIdx && currInfIdx < 3 -requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func CurrSeg(raw []byte, offset int, @@ -305,11 +306,11 @@ pure func CurrSeg(raw []byte, ghost opaque +requires sl.Bytes(raw, 0, len(raw)) requires 0 <= headerOffset requires segs.Valid() requires PktLen(segs, headerOffset) <= len(raw) requires 1 <= currInfIdx && currInfIdx < 4 -requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func LeftSeg( raw []byte, @@ -326,11 +327,11 @@ pure func LeftSeg( ghost opaque +requires sl.Bytes(raw, 0, len(raw)) requires 0 <= headerOffset requires segs.Valid() requires PktLen(segs, headerOffset) <= len(raw) requires -1 <= currInfIdx && currInfIdx < 2 -requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func RightSeg( raw []byte, @@ -347,11 +348,11 @@ pure func RightSeg( ghost opaque +requires sl.Bytes(raw, 0, len(raw)) requires 0 <= headerOffset requires segs.Valid() requires PktLen(segs, headerOffset) <= len(raw) requires 2 <= currInfIdx && currInfIdx < 5 -requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func MidSeg( raw []byte, @@ -368,9 +369,10 @@ pure func MidSeg( ghost opaque -requires acc(sl.Bytes(raw, 0, len(raw)), R56) +requires sl.Bytes(raw, 0, len(raw)) requires validPktMetaHdr(raw) decreases +// TODO: rename this to View() pure func (s *Raw) absPkt(raw []byte) (res io.IO_pkt2) { return let _ := reveal validPktMetaHdr(raw) in let metaHdr := RawBytesToMetaHdr(raw) in @@ -385,26 +387,26 @@ pure func (s *Raw) absPkt(raw []byte) (res io.IO_pkt2) { let numINF := segs.NumInfoFields() in let offset := HopFieldOffset(numINF, prevSegLen, MetaLen) in io.IO_Packet2 { - CurrSeg : CurrSeg(raw, offset, currInfIdx, currHfIdx - prevSegLen, segLen, MetaLen), - LeftSeg : LeftSeg(raw, currInfIdx + 1, segs, MetaLen), - MidSeg : MidSeg(raw, currInfIdx + 2, segs, MetaLen), - RightSeg : RightSeg(raw, currInfIdx - 1, segs, MetaLen), + CurrSeg: CurrSeg(raw, offset, currInfIdx, currHfIdx - prevSegLen, segLen, MetaLen), + LeftSeg: LeftSeg(raw, currInfIdx + 1, segs, MetaLen), + MidSeg: MidSeg(raw, currInfIdx + 2, segs, MetaLen), + RightSeg: RightSeg(raw, currInfIdx - 1, segs, MetaLen), } } ghost requires MetaLen <= len(raw) -requires acc(sl.Bytes(raw, 0, len(raw)), R56) +requires sl.Bytes(raw, 0, len(raw)) decreases pure func RawBytesToMetaHdr(raw []byte) MetaHdr { - return unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in + return unfolding sl.Bytes(raw, 0, len(raw)) in let hdr := binary.BigEndian.Uint32(raw[:MetaLen]) in DecodedFrom(hdr) } ghost requires MetaLen <= len(raw) -requires acc(sl.Bytes(raw, 0, len(raw)), R56) +requires sl.Bytes(raw, 0, len(raw)) decreases pure func RawBytesToBase(raw []byte) Base { return let metaHdr := RawBytesToMetaHdr(raw) in @@ -417,7 +419,7 @@ pure func RawBytesToBase(raw []byte) Base { ghost opaque -requires acc(sl.Bytes(raw, 0, len(raw)), R56) +requires sl.Bytes(raw, 0, len(raw)) decreases pure func validPktMetaHdr(raw []byte) bool { return MetaLen <= len(raw) && @@ -518,23 +520,23 @@ pure func absIncPathSeg(currseg io.IO_seg3) io.IO_seg3 { } ghost -requires acc(s.Mem(ub), _) +requires s.Mem(ub) decreases pure func (s *Raw) IsLastHopSpec(ub []byte) bool { - return unfolding acc(s.Mem(ub), _) in - unfolding acc(s.Base.Mem(), _) in + return unfolding s.Mem(ub) in + unfolding s.Base.Mem() in int(s.PathMeta.CurrHF) == (s.NumHops - 1) } ghost opaque -requires acc(s.Mem(ub), _) +requires s.Mem(ub) requires 0 <= idx && idx < s.GetNumINF(ub) -requires acc(sl.Bytes(ub, 0, len(ub)), R56) +requires sl.Bytes(ub, 0, len(ub)) decreases pure func (s *Raw) CorrectlyDecodedInfWithIdx(ub []byte, idx int, info path.InfoField) bool { - return unfolding acc(s.Mem(ub), _) in - unfolding acc(s.Base.Mem(), _) in + return unfolding s.Mem(ub) in + unfolding s.Base.Mem() in let infOffset := MetaLen + idx*path.InfoLen in infOffset + path.InfoLen <= len(ub) && info.ToAbsInfoField() == @@ -543,13 +545,13 @@ pure func (s *Raw) CorrectlyDecodedInfWithIdx(ub []byte, idx int, info path.Info ghost opaque -requires acc(s.Mem(ub), _) +requires s.Mem(ub) requires s.GetBase(ub).ValidCurrInfSpec() -requires acc(sl.Bytes(ub, 0, len(ub)), R56) +requires sl.Bytes(ub, 0, len(ub)) decreases pure func (s *Raw) CorrectlyDecodedInf(ub []byte, info path.InfoField) bool { - return unfolding acc(s.Mem(ub), _) in - unfolding acc(s.Base.Mem(), _) in + return unfolding s.Mem(ub) in + unfolding s.Base.Mem() in let infOffset := MetaLen + int(s.Base.PathMeta.CurrINF) * path.InfoLen in infOffset + path.InfoLen <= len(ub) && info.ToAbsInfoField() == @@ -558,13 +560,13 @@ pure func (s *Raw) CorrectlyDecodedInf(ub []byte, info path.InfoField) bool { ghost opaque -requires acc(s.Mem(ub), _) +requires s.Mem(ub) requires 0 <= idx && idx < s.GetNumHops(ub) -requires acc(sl.Bytes(ub, 0, len(ub)), R56) +requires sl.Bytes(ub, 0, len(ub)) decreases pure func (s *Raw) CorrectlyDecodedHfWithIdx(ub []byte, idx int, hop path.HopField) bool { - return unfolding acc(s.Mem(ub), _) in - unfolding acc(s.Base.Mem(), _) in + return unfolding s.Mem(ub) in + unfolding s.Base.Mem() in let hopOffset := MetaLen + int(s.NumINF) * path.InfoLen + idx * path.HopLen in hopOffset + path.HopLen <= len(ub) && hop.ToIO_HF() == path.BytesToIO_HF(ub, 0, hopOffset, len(ub)) @@ -572,13 +574,13 @@ pure func (s *Raw) CorrectlyDecodedHfWithIdx(ub []byte, idx int, hop path.HopFie ghost opaque -requires acc(s.Mem(ub), _) +requires s.Mem(ub) requires s.GetBase(ub).ValidCurrHfSpec() -requires acc(sl.Bytes(ub, 0, len(ub)), R56) +requires sl.Bytes(ub, 0, len(ub)) decreases pure func (s *Raw) CorrectlyDecodedHf(ub []byte, hop path.HopField) bool { - return unfolding acc(s.Mem(ub), _) in - unfolding acc(s.Base.Mem(), _) in + return unfolding s.Mem(ub) in + unfolding s.Base.Mem() in let hopOffset := MetaLen + int(s.NumINF) * path.InfoLen + int(s.Base.PathMeta.CurrHF) * path.HopLen in hopOffset + path.HopLen <= len(ub) && @@ -654,8 +656,8 @@ opaque decreases pure func (s *Raw) EqAbsInfoField(pkt io.IO_pkt2, info io.AbsInfoField) bool { return let currseg := pkt.CurrSeg in - info.AInfo == currseg.AInfo && - info.UInfo == currseg.UInfo && + info.AInfo == currseg.AInfo && + info.UInfo == currseg.UInfo && info.ConsDir == currseg.ConsDir && info.Peer == currseg.Peer } @@ -674,7 +676,7 @@ ensures s.EqAbsInfoField(s.absPkt(ubuf), info.ToAbsInfoField()) ensures s.EqAbsHopField(s.absPkt(ubuf), hop.ToIO_HF()) decreases func (s *Raw) DecodingLemma(ubuf []byte, info path.InfoField, hop path.HopField) { - reveal validPktMetaHdr(ubuf) + assert reveal validPktMetaHdr(ubuf) metaHdr := RawBytesToMetaHdr(ubuf) currInfIdx := int(metaHdr.CurrINF) currHfIdx := int(metaHdr.CurrHF) @@ -687,13 +689,18 @@ func (s *Raw) DecodingLemma(ubuf []byte, info path.InfoField, hop path.HopField) numINF := segs.NumInfoFields() offset := HopFieldOffset(numINF, prevSegLen, MetaLen) hfIdxSeg := currHfIdx - prevSegLen - reveal s.CorrectlyDecodedInf(ubuf, info) - reveal s.CorrectlyDecodedHf(ubuf, hop) - reveal s.absPkt(ubuf) + assert reveal s.CorrectlyDecodedInf(ubuf, info) + assert reveal s.CorrectlyDecodedHf(ubuf, hop) + reveal CurrSeg(ubuf, offset, currInfIdx, hfIdxSeg, segLen, MetaLen) HopsFromPrefixOfRawMatchPrefixOfHops(ubuf, offset, 0, segLen, hfIdxSeg) - assert reveal s.EqAbsInfoField(s.absPkt(ubuf), info.ToAbsInfoField()) - assert reveal s.EqAbsHopField(s.absPkt(ubuf), hop.ToIO_HF()) + + pktView := reveal s.absPkt(ubuf) + infoView := info.ToAbsInfoField() + + assert pktView.CurrSeg.Peer == infoView.Peer + assert reveal s.EqAbsInfoField(pktView, infoView) + assert reveal s.EqAbsHopField(pktView, hop.ToIO_HF()) } ghost diff --git a/pkg/slayers/path/scion/widen-lemma.gobra b/pkg/slayers/path/scion/widen-lemma.gobra index 0297715a6..69294dc0f 100644 --- a/pkg/slayers/path/scion/widen-lemma.gobra +++ b/pkg/slayers/path/scion/widen-lemma.gobra @@ -50,10 +50,15 @@ func WidenCurrSeg(raw []byte, ainfo1 := path.Timestamp(raw, currInfIdx, headerOffset) ainfo2 := path.Timestamp(raw[start:length], currInfIdx, headerOffset-start) + sl.AssertSliceOverlap(raw, start, length) + idxTimestamp := path.InfoFieldOffset(currInfIdx, headerOffset-start)+4 + sl.AssertSliceOverlap(raw[start:length], idxTimestamp, idxTimestamp+4) assert ainfo1 == ainfo2 uinfo1 := path.AbsUinfo(raw, currInfIdx, headerOffset) uinfo2 := path.AbsUinfo(raw[start:length], currInfIdx, headerOffset-start) + idxUinfo := path.InfoFieldOffset(currInfIdx, headerOffset-start)+2 + sl.AssertSliceOverlap(raw[start:length], idxUinfo, idxUinfo+2) assert uinfo1 == uinfo2 consDir1 := path.ConsDir(raw, currInfIdx, headerOffset) @@ -96,25 +101,6 @@ func widenSegment(raw []byte, widenHopFields(raw, offset, 0, segLen, start, length, newP) } -ghost -requires 0 <= start && start <= middle -requires middle + path.HopLen <= length -requires length <= len(raw) -preserves acc(sl.Bytes(raw, 0, len(raw)), R54) -preserves acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R54) -ensures path.BytesToIO_HF(raw, 0, middle, len(raw)) == - path.BytesToIO_HF(raw[start:length], 0, middle-start, length-start) -decreases -func widenBytesToIO_HF(raw []byte, middle int, start int, length int) { - unfold acc(sl.Bytes(raw, 0, len(raw)), R55) - unfold acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R55) - hfBytes1 := path.BytesToIO_HF(raw, 0, middle, len(raw)) - hfBytes2 := path.BytesToIO_HF(raw[start:length], 0, middle-start, length-start) - assert hfBytes1 == hfBytes2 - fold acc(sl.Bytes(raw, 0, len(raw)), R55) - fold acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R55) -} - ghost requires R53 < p requires 0 <= start && start <= offset @@ -128,7 +114,7 @@ ensures hopFields(raw, offset, currHfIdx, segLen) == decreases segLen - currHfIdx func widenHopFields(raw []byte, offset int, currHfIdx int, segLen int, start int, length int, p perm) { if (currHfIdx != segLen) { - widenBytesToIO_HF(raw, offset + path.HopLen * currHfIdx, start, length) + path.WidenBytesHopField(raw, offset + path.HopLen * currHfIdx, start, length) hf1 := path.BytesToIO_HF(raw, 0, offset + path.HopLen * currHfIdx, len(raw)) hf2 := path.BytesToIO_HF(raw[start:length], 0, offset + path.HopLen * currHfIdx - start, length - start) newP := (p + R53)/2 diff --git a/pkg/slayers/scion.go b/pkg/slayers/scion.go index e3de3e8f3..d3a87768f 100644 --- a/pkg/slayers/scion.go +++ b/pkg/slayers/scion.go @@ -223,12 +223,8 @@ func (s *SCION) NetworkFlow() (res gopacket.Flow) { // @ ensures acc(s.Mem(ubuf), R0) // @ ensures sl.Bytes(ubuf, 0, len(ubuf)) // @ ensures sl.Bytes(b.UBuf(), 0, len(b.UBuf())) -// TODO: hide internal spec details // @ ensures e == nil && s.HasOneHopPath(ubuf) ==> -// @ len(b.UBuf()) == old(len(b.UBuf())) + unfolding acc(s.Mem(ubuf), R55) in -// @ (CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.LenSpec(ubuf[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen])) -// @ ensures e == nil && s.HasOneHopPath(ubuf) ==> -// @ (unfolding acc(s.Mem(ubuf), R55) in CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.LenSpec(ubuf[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen])) <= len(ubuf) +// @ len(b.UBuf()) == old(len(b.UBuf())) + s.MinSizeOfUbufWithOneHop(ubuf) // @ ensures e != nil ==> e.ErrorMem() // post for IO: // @ ensures e == nil && old(s.EqPathType(ubuf)) ==> @@ -795,17 +791,19 @@ func packAddr(hostAddr net.Addr /*@ , ghost wildcard bool @*/) (addrtyp AddrType // @ } if ip := a.IP.To4( /*@ wildcard @*/ ); ip != nil { // @ ghost if !wildcard && isIPv6(a) { - // @ assert isConvertibleToIPv4(hostAddr) ==> forall i int :: { &b[i] } 0 <= i && i < len(b) ==> &b[i] == &a.IP[12+i] + // @ assert isConvertibleToIPv4(hostAddr) ==> + // @ forall i int :: { &b[i] } 0 <= i && i < len(b) ==> &b[i] == &a.IP[12+i] // @ } - // @ assert !wildcard && isIP(hostAddr) ==> (unfolding acc(hostAddr.Mem(), R20) in (isIPv6(hostAddr) && isConvertibleToIPv4(hostAddr) ==> forall i int :: { &b[i] } 0 <= i && i < len(b) ==> &b[i] == &hostAddr.(*net.IPAddr).IP[12+i])) + // @ assert !wildcard && isIP(hostAddr) ==> + // @ (unfolding acc(hostAddr.Mem(), R20) in (isIPv6(hostAddr) && isConvertibleToIPv4(hostAddr) ==> forall i int :: { &b[i] } 0 <= i && i < len(b) ==> &b[i] == &hostAddr.(*net.IPAddr).IP[12+i])) // @ ghost if wildcard { - // @ fold acc(sl.Bytes(ip, 0, len(ip)), _) + // @ fold acc(sl.Bytes(ip, 0, len(ip)), _) // @ } else { - // @ fold acc(sl.Bytes(ip, 0, len(ip)), R20) - // @ package acc(sl.Bytes(ip, 0, len(ip)), R20) --* acc(hostAddr.Mem(), R20) { - // @ unfold acc(sl.Bytes(ip, 0, len(ip)), R20) - // @ fold acc(hostAddr.Mem(), R20) - // @ } + // @ fold acc(sl.Bytes(ip, 0, len(ip)), R20) + // @ package acc(sl.Bytes(ip, 0, len(ip)), R20) --* acc(hostAddr.Mem(), R20) { + // @ unfold acc(sl.Bytes(ip, 0, len(ip)), R20) + // @ fold acc(hostAddr.Mem(), R20) + // @ } // @ } return T4Ip, ip, nil } diff --git a/pkg/slayers/scion_spec.gobra b/pkg/slayers/scion_spec.gobra index a66bce011..5aef3f771 100644 --- a/pkg/slayers/scion_spec.gobra +++ b/pkg/slayers/scion_spec.gobra @@ -96,27 +96,29 @@ pred PathPoolMemExceptOne(pathPool []path.Path, pathPoolRaw path.Path, pathType } ghost -requires acc(&s.pathPool, _) +requires acc(&s.pathPool) decreases pure func (s *SCION) pathPoolInitialized() bool { return s.pathPool != nil } ghost -requires acc(s.NonInitMem(), _) +requires s.NonInitMem() decreases pure func (s *SCION) PathPoolInitializedNonInitMem() bool { - return unfolding acc(s.NonInitMem(), _) in s.pathPool != nil + return unfolding s.NonInitMem() in + s.pathPool != nil } ghost requires 0 <= pathType && pathType < path.MaxPathType -requires acc(&s.pathPool, _) && acc(&s.pathPoolRaw, _) +requires acc(&s.pathPool) && acc(&s.pathPoolRaw) requires s.pathPool != nil -requires acc(PathPoolMemExceptOne(s.pathPool, s.pathPoolRaw, pathType), _) +requires PathPoolMemExceptOne(s.pathPool, s.pathPoolRaw, pathType) decreases pure func (s *SCION) getPathPure(pathType path.Type) path.Path { - return int(pathType) < len(s.pathPool)? (unfolding acc(PathPoolMemExceptOne(s.pathPool, s.pathPoolRaw, pathType), _) in s.pathPool[pathType]) : + return int(pathType) < len(s.pathPool)? + (unfolding PathPoolMemExceptOne(s.pathPool, s.pathPoolRaw, pathType) in s.pathPool[pathType]) : s.pathPoolRaw } @@ -180,14 +182,15 @@ pred (s *SCION) Mem(ubuf []byte) { // end of path pool // helpful facts for other methods: // - for router::updateScionLayer: - (typeOf(s.Path) == type[*onehop.Path] ==> CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.LenSpec(ubuf[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen]) <= len(ubuf)) + (typeOf(s.Path) == type[*onehop.Path] ==> + s.ValidSizeOhpUbOpenInv(ubuf)) } ghost -requires acc(s.Mem(ub), _) +requires s.Mem(ub) decreases pure func (s *SCION) ValidPathMetaData(ghost ub []byte) bool { - return unfolding acc(s.Mem(ub), _) in + return unfolding s.Mem(ub) in let ubPath := s.UBPath(ub) in (typeOf(s.Path) == type[*scion.Raw] ==> s.Path.(*scion.Raw).GetBase(ubPath).Valid()) && @@ -269,55 +272,51 @@ func (s *SCION) ExtractAcc(ghost ub []byte) (start, end int) { } ghost -pure decreases -func (a AddrType) Has3Bits() (res bool) { +pure func (a AddrType) Has3Bits() (res bool) { return 0 <= a && a <= 7 } ghost -pure -requires acc(s.Mem(ub), _) +requires s.Mem(ub) decreases -func (s *SCION) UBPath(ub []byte) []byte { - return unfolding acc(s.Mem(ub), _) in +pure func (s *SCION) UBPath(ub []byte) []byte { + return unfolding s.Mem(ub) in ub[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen] } ghost -pure -requires acc(s.Mem(ub), _) +requires s.Mem(ub) decreases -func (s *SCION) UBScionPath(ub []byte) []byte { - return unfolding acc(s.Mem(ub), _) in +pure func (s *SCION) UBScionPath(ub []byte) []byte { + return unfolding s.Mem(ub) in let ubPath := ub[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen] in typeOf(s.Path) == *epic.Path ? - unfolding acc(s.Path.Mem(ubPath), _) in ubPath[epic.MetadataLen:] : + unfolding s.Path.Mem(ubPath) in ubPath[epic.MetadataLen:] : ubPath } ghost -pure -requires acc(s.Mem(ub), _) +requires s.Mem(ub) decreases -func (s *SCION) PathStartIdx(ub []byte) int { - return unfolding acc(s.Mem(ub), _) in CmnHdrLen+s.AddrHdrLenSpecInternal() +pure func (s *SCION) PathStartIdx(ub []byte) int { + return unfolding s.Mem(ub) in + CmnHdrLen+s.AddrHdrLenSpecInternal() } ghost -pure -requires acc(s.Mem(ub), _) +requires s.Mem(ub) decreases -func (s *SCION) PathEndIdx(ub []byte) int { - return unfolding acc(s.Mem(ub), _) in int(s.HdrLen*LineLen) +pure func (s *SCION) PathEndIdx(ub []byte) int { + return unfolding s.Mem(ub) in + int(s.HdrLen*LineLen) } ghost -pure -requires acc(s.Mem(ub), _) +requires s.Mem(ub) decreases -func (s *SCION) PathScionStartIdx(ub []byte) int { - return unfolding acc(s.Mem(ub), _) in +pure func (s *SCION) PathScionStartIdx(ub []byte) int { + return unfolding s.Mem(ub) in let offset := CmnHdrLen+s.AddrHdrLenSpecInternal() in typeOf(s.Path) == *epic.Path ? offset + epic.MetadataLen : @@ -325,11 +324,11 @@ func (s *SCION) PathScionStartIdx(ub []byte) int { } ghost -pure -requires acc(s.Mem(ub), _) +requires s.Mem(ub) decreases -func (s *SCION) PathScionEndIdx(ub []byte) int { - return unfolding acc(s.Mem(ub), _) in int(s.HdrLen*LineLen) +pure func (s *SCION) PathScionEndIdx(ub []byte) int { + return unfolding s.Mem(ub) in + int(s.HdrLen*LineLen) } ghost @@ -345,21 +344,20 @@ func LemmaPathIdxStartEnd(s *SCION, ub []byte, p perm) { } ghost -pure -requires acc(s.Mem(ub), _) +requires s.Mem(ub) decreases -func (s *SCION) GetPath(ub []byte) path.Path { - return unfolding acc(s.Mem(ub), _) in s.Path +pure func (s *SCION) GetPath(ub []byte) path.Path { + return unfolding s.Mem(ub) in + s.Path } ghost opaque -pure -requires acc(s.Mem(ub), _) -requires acc(sl.Bytes(ub, 0, length), _) +requires s.Mem(ub) +requires sl.Bytes(ub, 0, length) requires CmnHdrLen <= length decreases -func (s *SCION) ValidHeaderOffset(ub []byte, length int) bool { +pure func (s *SCION) ValidHeaderOffset(ub []byte, length int) bool { return GetAddressOffsetWithinLength(ub, length) == s.PathStartIdx(ub) && GetLengthWithinLength(ub,length) == s.PathEndIdx(ub) } @@ -406,21 +404,20 @@ func (s *SCION) ValidHeaderOffsetFromSubSliceLemma(ub []byte, length int) { ghost opaque -pure -requires acc(s.Mem(ub), _) -requires acc(sl.Bytes(ub, 0, len(ub)), _) +requires s.Mem(ub) +requires sl.Bytes(ub, 0, len(ub)) decreases -func (s *SCION) EqAbsHeader(ub []byte) bool { - return unfolding acc(s.Mem(ub), _) in +pure func (s *SCION) EqAbsHeader(ub []byte) bool { + return unfolding s.Mem(ub) in let low := CmnHdrLen+s.AddrHdrLenSpecInternal() in let high := s.HdrLen*LineLen in - GetAddressOffset(ub) == low && - GetLength(ub) == int(high) && + GetAddressOffset(ub) == low && + GetLength(ub) == int(high) && // Might be worth introducing EqAbsHeader as an interface method on Path // to avoid doing these casts, especially when we add support for EPIC. typeOf(s.Path) == (*scion.Raw) && - unfolding acc(s.Path.Mem(ub[low:high]), _) in - unfolding acc(sl.Bytes(ub, 0, len(ub)), _) in + unfolding s.Path.Mem(ub[low:high]) in + unfolding sl.Bytes(ub, 0, len(ub)) in let _ := Asserting(forall k int :: {&ub[low:high][k]} 0 <= k && k < high ==> &ub[low:high][k] == &ub[low + k]) in let _ := Asserting(forall k int :: {&ub[low:high][:scion.MetaLen][k]} 0 <= k && k < scion.MetaLen ==> @@ -431,17 +428,16 @@ func (s *SCION) EqAbsHeader(ub []byte) bool { let seg3 := int(metaHdr.SegLen[2]) in let segs := io.CombineSegLens(seg1, seg2, seg3) in s.Path.(*scion.Raw).Base.GetBase() == - scion.Base{metaHdr, segs.NumInfoFields(), segs.TotalHops()} + scion.Base{metaHdr, segs.NumInfoFields(), segs.TotalHops()} } // Describes a SCION packet that was successfully decoded by `DecodeFromBytes`. ghost opaque -pure -requires acc(s.Mem(ub), _) +requires s.Mem(ub) decreases -func (s *SCION) ValidScionInitSpec(ub []byte) bool { - return unfolding acc(s.Mem(ub), _) in +pure func (s *SCION) ValidScionInitSpec(ub []byte) bool { + return unfolding s.Mem(ub) in let low := CmnHdrLen+s.AddrHdrLenSpecInternal() in let high := s.HdrLen*LineLen in typeOf(s.Path) == (*scion.Raw) && @@ -451,7 +447,7 @@ func (s *SCION) ValidScionInitSpec(ub []byte) bool { // Checks if the common path header is valid in the serialized scion packet. ghost opaque -requires acc(sl.Bytes(raw, 0, len(raw)), R56) +requires sl.Bytes(raw, 0, len(raw)) decreases pure func ValidPktMetaHdr(raw []byte) bool { return CmnHdrLen <= len(raw) && @@ -461,7 +457,7 @@ pure func ValidPktMetaHdr(raw []byte) bool { let rawHdr := raw[start:end] in let length := GetLength(raw) in length <= len(raw) && - unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in + unfolding sl.Bytes(raw, 0, len(raw)) in let _ := Asserting(forall k int :: {&rawHdr[k]} 0 <= k && k < scion.MetaLen ==> &rawHdr[k] == &raw[start + k]) in let hdr := binary.BigEndian.Uint32(rawHdr) in let metaHdr := scion.DecodedFrom(hdr) in @@ -477,7 +473,7 @@ pure func ValidPktMetaHdr(raw []byte) bool { ghost opaque -requires acc(sl.Bytes(raw, 0, len(raw)), R56) +requires sl.Bytes(raw, 0, len(raw)) decreases pure func IsSupportedPkt(raw []byte) bool { return CmnHdrLen <= len(raw) && @@ -529,7 +525,7 @@ func (s *SCION) IsSupportedPktLemma(ub []byte, buffer []byte) { } ghost -requires acc(sl.Bytes(ub, 0, len(ub)), _) +requires sl.Bytes(ub, 0, len(ub)) requires CmnHdrLen <= len(ub) decreases pure func GetAddressOffset(ub []byte) int { @@ -537,18 +533,18 @@ pure func GetAddressOffset(ub []byte) int { } ghost -requires acc(sl.Bytes(ub, 0, length), _) +requires sl.Bytes(ub, 0, length) requires CmnHdrLen <= length decreases pure func GetAddressOffsetWithinLength(ub []byte, length int) int { - return unfolding acc(sl.Bytes(ub, 0, length), _) in + return unfolding sl.Bytes(ub, 0, length) in let dstAddrLen := AddrType(ub[9] >> 4 & 0x7).Length() in - let srcAddrLen := AddrType(ub[9] & 0x7).Length() in + let srcAddrLen := AddrType(ub[9] & 0x7).Length() in CmnHdrLen + 2*addr.IABytes + dstAddrLen + srcAddrLen } ghost -requires acc(sl.Bytes(ub, 0, len(ub)), _) +requires sl.Bytes(ub, 0, len(ub)) requires CmnHdrLen <= len(ub) decreases pure func GetLength(ub []byte) int { @@ -556,33 +552,36 @@ pure func GetLength(ub []byte) int { } ghost -requires acc(sl.Bytes(ub, 0, length), _) +requires sl.Bytes(ub, 0, length) requires CmnHdrLen <= length decreases pure func GetLengthWithinLength(ub []byte, length int) int { - return unfolding acc(sl.Bytes(ub, 0, length), _) in int(ub[5])*LineLen + return unfolding sl.Bytes(ub, 0, length) in + int(ub[5]) * LineLen } ghost -requires acc(sl.Bytes(ub, 0, len(ub)), _) +requires sl.Bytes(ub, 0, len(ub)) requires CmnHdrLen <= len(ub) decreases pure func GetPathType(ub []byte) int { - return unfolding acc(sl.Bytes(ub, 0, len(ub)), _) in int(ub[8]) + return unfolding sl.Bytes(ub, 0, len(ub)) in + int(ub[8]) } ghost -requires acc(sl.Bytes(ub, 0, len(ub)), _) +requires sl.Bytes(ub, 0, len(ub)) requires CmnHdrLen <= len(ub) decreases pure func GetNextHdr(ub []byte) int { - return unfolding acc(sl.Bytes(ub, 0, len(ub)), _) in int(ub[4]) + return unfolding sl.Bytes(ub, 0, len(ub)) in + int(ub[4]) } ghost opaque -requires acc(s.Mem(ub), _) -requires acc(sl.Bytes(ub, 0, len(ub)), _) +requires s.Mem(ub) +requires sl.Bytes(ub, 0, len(ub)) decreases pure func (s *SCION) EqPathType(ub []byte) bool { return reveal s.EqPathTypeWithBuffer(ub, ub) @@ -590,61 +589,60 @@ pure func (s *SCION) EqPathType(ub []byte) bool { ghost opaque -requires acc(s.Mem(ub), _) -requires acc(sl.Bytes(buffer, 0, len(buffer)), _) +requires s.Mem(ub) +requires sl.Bytes(buffer, 0, len(buffer)) decreases pure func (s *SCION) EqPathTypeWithBuffer(ub []byte, buffer []byte) bool { - return unfolding acc(s.Mem(ub), _) in + return unfolding s.Mem(ub) in CmnHdrLen <= len(buffer) && path.Type(GetPathType(buffer)) == s.PathType && L4ProtocolType(GetNextHdr(buffer)) == s.NextHdr } ghost -pure -requires acc(s.Mem(ub), _) +requires s.Mem(ub) decreases -func (s *SCION) GetScionPath(ub []byte) path.Path { - return unfolding acc(s.Mem(ub), _) in ( +pure func (s *SCION) GetScionPath(ub []byte) path.Path { + return unfolding s.Mem(ub) in ( typeOf(s.Path) == *epic.Path ? (let ubPath := ub[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen] in - unfolding acc(s.Path.Mem(ubPath), _) in + unfolding s.Path.Mem(ubPath) in (path.Path)(s.Path.(*epic.Path).ScionPath)) : s.Path) } ghost -requires acc(s.Mem(ub), _) +requires s.Mem(ub) decreases pure func (s *SCION) GetPayloadLen(ghost ub []byte) uint16 { - return unfolding acc(s.Mem(ub), _) in s.PayloadLen + return unfolding s.Mem(ub) in + s.PayloadLen } ghost -requires acc(s.Mem(ub), _) +requires s.Mem(ub) decreases pure func (s *SCION) GetPayload(ghost ub []byte) []byte { - return unfolding acc(s.Mem(ub), _) in s.Payload + return unfolding s.Mem(ub) in + s.Payload } ghost -requires acc(&s.DstAddrType, _) && acc(&s.SrcAddrType, _) +requires acc(&s.DstAddrType) && acc(&s.SrcAddrType) requires s.DstAddrType.Has3Bits() && s.SrcAddrType.Has3Bits() ensures 0 <= res decreases -pure -func (s *SCION) AddrHdrLenSpecInternal() (res int) { +pure func (s *SCION) AddrHdrLenSpecInternal() (res int) { return 2*addr.IABytes + s.DstAddrType.Length() + s.SrcAddrType.Length() } ghost -requires acc(s.Mem(ubuf), R55) +requires s.Mem(ubuf) ensures 0 <= res decreases -pure -func (s *SCION) AddrHdrLenSpec(ubuf []byte) (res int) { - return unfolding acc(s.Mem(ubuf), R55) in - unfolding acc(s.HeaderMem(ubuf[CmnHdrLen:]), R55) in +pure func (s *SCION) AddrHdrLenSpec(ubuf []byte) (res int) { + return unfolding s.Mem(ubuf) in + unfolding s.HeaderMem(ubuf[CmnHdrLen:]) in 2*addr.IABytes + s.DstAddrType.Length() + s.SrcAddrType.Length() } @@ -712,7 +710,8 @@ func (s *SCION) PathPoolMemExchange(pathType path.Type, p path.Path) { ghost requires typeOf(a) == *net.IPAddr requires acc(&a.(*net.IPAddr).IP, _) -requires forall i int :: { &a.(*net.IPAddr).IP[i] } 0 <= i && i < len(a.(*net.IPAddr).IP) ==> acc(&a.(*net.IPAddr).IP[i], _) +requires forall i int :: { &a.(*net.IPAddr).IP[i] } 0 <= i && i < len(a.(*net.IPAddr).IP) ==> + acc(&a.(*net.IPAddr).IP[i]) requires len(a.(*net.IPAddr).IP) == net.IPv6len decreases pure func isConvertibleToIPv4(a net.Addr) bool { @@ -723,7 +722,7 @@ pure func isConvertibleToIPv4(a net.Addr) bool { ghost requires typeOf(a) == *net.IPAddr -requires acc(&a.(*net.IPAddr).IP, _) +requires acc(&a.(*net.IPAddr).IP) decreases pure func isIPv6(a net.Addr) bool { return len(a.(*net.IPAddr).IP) == net.IPv6len @@ -731,7 +730,7 @@ pure func isIPv6(a net.Addr) bool { ghost requires typeOf(a) == *net.IPAddr -requires acc(&a.(*net.IPAddr).IP, _) +requires acc(&a.(*net.IPAddr).IP) decreases pure func isIPv4(a net.Addr) bool { return len(a.(*net.IPAddr).IP) == net.IPv4len @@ -751,25 +750,65 @@ pure func isHostSVC(a net.Addr) bool { // invariant established by initialization ghost +trusted // TODO: drop this line after the static init PR ensures acc(path.PathPackageMem(), _) decreases func EstablishPathPkgMem() ghost -requires acc(s.Mem(ub), _) +requires s.Mem(ub) decreases pure func (s *SCION) HasOneHopPath(ghost ub []byte) bool { - return unfolding acc(s.Mem(ub), _) in typeOf(s.Path) == type[*onehop.Path] + return unfolding s.Mem(ub) in + typeOf(s.Path) == type[*onehop.Path] } ghost -requires acc(s.Mem(ub), _) +requires acc(&s.DstAddrType) && + acc(&s.SrcAddrType) && + acc(&s.HdrLen) && + acc(&s.Path) +requires s.DstAddrType.Has3Bits() && s.SrcAddrType.Has3Bits() +requires 0 <= CmnHdrLen+s.AddrHdrLenSpecInternal() && + CmnHdrLen+s.AddrHdrLenSpecInternal() <= s.HdrLen*LineLen && + s.HdrLen*LineLen <= len(ub) +requires s.Path != nil && s.Path.Mem(ub[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen]) +decreases +pure func (s *SCION) MinSizeOfUbufWithOneHopOpenInv(ub []byte) int { + return let pathSlice := ub[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen] in + let pathLen := s.Path.LenSpec(pathSlice) in + CmnHdrLen + s.AddrHdrLenSpecInternal() + pathLen +} + +ghost +requires s.Mem(ub) +decreases +pure func (s *SCION) MinSizeOfUbufWithOneHop(ub []byte) int { + return unfolding s.Mem(ub) in + s.MinSizeOfUbufWithOneHopOpenInv(ub) +} + +ghost +requires acc(&s.DstAddrType) && + acc(&s.SrcAddrType) && + acc(&s.HdrLen) && + acc(&s.Path) +requires s.DstAddrType.Has3Bits() && s.SrcAddrType.Has3Bits() +requires 0 <= CmnHdrLen+s.AddrHdrLenSpecInternal() && + CmnHdrLen+s.AddrHdrLenSpecInternal() <= s.HdrLen*LineLen && + s.HdrLen*LineLen <= len(ub) +requires s.Path != nil && s.Path.Mem(ub[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen]) +decreases +pure func (s *SCION) ValidSizeOhpUbOpenInv(ub []byte) (b bool) { + return s.MinSizeOfUbufWithOneHopOpenInv(ub) <= len(ub) +} + +ghost +requires s.Mem(ub) requires s.HasOneHopPath(ub) ensures b decreases -pure func (s *SCION) InferSizeOHP(ghost ub []byte) (b bool) { - return unfolding acc(s.Mem(ub), _) in - let pathSlice := ub[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen] in - let pathLen := s.Path.LenSpec(pathSlice) in - CmnHdrLen + s.AddrHdrLenSpecInternal() + pathLen <= len(ub) +pure func (s *SCION) ValidSizeOhpUb(ub []byte) (b bool) { + return unfolding s.Mem(ub) in + s.ValidSizeOhpUbOpenInv(ub) } \ No newline at end of file diff --git a/pkg/slayers/scmp_msg.go b/pkg/slayers/scmp_msg.go index 6df7fdee6..86e9fd9f7 100644 --- a/pkg/slayers/scmp_msg.go +++ b/pkg/slayers/scmp_msg.go @@ -381,7 +381,8 @@ func (i *SCMPEcho) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res // @ outline ( // @ sl.SplitByIndex_Bytes(data, 2, len(data), 4, writePerm) // @ unfold sl.Bytes(data, 2, 4) - // @ assert forall i int :: { &data[offset:offset+2][i] } 0 <= i && i < 2 ==> &data[offset + i] == &data[offset : offset+2][i] + // @ assert &data[offset : offset+2][0] == &data[offset] + // @ assert &data[offset : offset+2][1] == &data[offset+1] i.SeqNumber = binary.BigEndian.Uint16(data[offset : offset+2]) // @ fold sl.Bytes(data, 2, 4) // @ ) @@ -398,13 +399,15 @@ func (i *SCMPEcho) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res // @ sl.CombineAtIndex_Bytes(data, 0, 4, 2, writePerm) // @ unfold sl.Bytes(data, 0, 4) // @ unfold sl.Bytes(data, 4, len(data)) - // @ assert forall i int :: { &data[offset:][i] } 0 <= i && i < len(data) - offset ==> &data[offset:][i] == &data[offset + i] + // @ sl.AssertSliceOverlap(data, offset, len(data)) i.BaseLayer = BaseLayer{ Contents: data[:offset], Payload: data[offset:], } - // @ assert forall l int :: { &i.Payload[l] } 0 <= l && l < len(i.Payload) ==> &data[offset+l] == &i.Payload[l] - // @ assert forall l int :: { &i.Payload[l] } 0 <= l && l < len(i.Payload) ==> acc(&i.Payload[l]) + // @ assert forall l int :: { &i.Payload[l] } 0 <= l && l < len(i.Payload) ==> + // @ &data[offset+l] == &i.Payload[l] + // @ assert forall l int :: { &i.Payload[l] } 0 <= l && l < len(i.Payload) ==> + // @ acc(&i.Payload[l]) // @ fold sl.Bytes(i.Contents, 0, len(i.Contents)) // @ fold sl.Bytes(i.Payload, 0, len(i.Payload)) // @ fold i.BaseLayer.Mem(data, 4) @@ -439,7 +442,8 @@ func (i *SCMPEcho) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.Seriali // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ sl.SplitByIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 4, writePerm) // @ unfold sl.Bytes(underlyingBufRes, 2, 4) - // @ assert forall i int :: { &buf[offset:offset+2][i] } 0 <= i && i < 2 ==> &buf[offset:offset+2][i] == &buf[offset + i] + // @ assert &buf[offset : offset+2][0] == &buf[offset] + // @ assert &buf[offset : offset+2][1] == &buf[offset+1] binary.BigEndian.PutUint16(buf[offset:offset+2], i.SeqNumber) // @ fold sl.Bytes(underlyingBufRes, 2, 4) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 4, writePerm) @@ -513,7 +517,8 @@ func (i *SCMPParameterProblem) DecodeFromBytes(data []byte, df gopacket.DecodeFe // @ sl.SplitByIndex_Bytes(data, 0, len(data), 2, writePerm) // @ sl.SplitByIndex_Bytes(data, 2, len(data), 4, writePerm) // @ unfold sl.Bytes(data, 2, 4) - // @ assert forall i int :: { &data[2:4][i] } 0 <= i && i < 2 ==> &data[2:4][i] == &data[2 + i] + // @ assert &data[2:4][0] == &data[2] + // @ assert &data[2:4][1] == &data[3] i.Pointer = binary.BigEndian.Uint16(data[2:4]) // @ fold sl.Bytes(data, 2, 4) // @ sl.CombineAtIndex_Bytes(data, 0, 4, 2, writePerm) @@ -526,12 +531,13 @@ func (i *SCMPParameterProblem) DecodeFromBytes(data []byte, df gopacket.DecodeFe // @ decreases // @ outline ( // @ unfold sl.Bytes(data, 0, len(data)) - // @ assert forall i int :: { &data[4:][i] } 0 <= i && i < len(data) ==> &data[4:][i] == &data[4 + i] + // @ sl.AssertSliceOverlap(data, 4, len(data)) i.BaseLayer = BaseLayer{ Contents: data[:4], Payload: data[4:], } - // @ assert forall l int :: { &i.Payload[l] } 0 <= l && l < len(i.Payload) ==> &data[4+l] == &i.Payload[l] + // @ assert forall l int :: { &i.Payload[l] } 0 <= l && l < len(i.Payload) ==> + // @ &data[4+l] == &i.Payload[l] // @ fold sl.Bytes(i.Contents, 0, len(i.Contents)) // @ fold sl.Bytes(i.Payload, 0, len(i.Payload)) // @ fold i.BaseLayer.Mem(data, 4) @@ -565,7 +571,8 @@ func (i *SCMPParameterProblem) SerializeTo(b gopacket.SerializeBuffer, opts gopa // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ sl.SplitByIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 4, writePerm) // @ unfold sl.Bytes(underlyingBufRes, 2, 4) - // @ assert forall i int :: { &buf[2:4][i] } 0 <= i && i < 2 ==> &buf[2:4][i] == &buf[2 + i] + // @ assert &buf[2:4][0] == &buf[2] + // @ assert &buf[2:4][1] == &buf[3] binary.BigEndian.PutUint16(buf[2:4], i.Pointer) // @ fold sl.Bytes(underlyingBufRes, 2, 4) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 4, writePerm) @@ -670,7 +677,8 @@ func (i *SCMPTraceroute) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback // @ outline ( // @ sl.SplitByIndex_Bytes(data, 2, len(data), 2+2, R40) // @ unfold acc(sl.Bytes(data, 2, 2+2), R40) - // @ assert forall i int :: { &data[offset:offset+2][i] } 0 <= i && i < 2 ==> &data[offset + i] == &data[offset : offset+2][i] + // @ assert &data[offset : offset+2][0] == &data[offset] + // @ assert &data[offset : offset+2][1] == &data[offset+1] i.Sequence = binary.BigEndian.Uint16(data[offset : offset+2]) // @ fold acc(sl.Bytes(data, 2, 2+2), R40) // @ ) @@ -684,8 +692,8 @@ func (i *SCMPTraceroute) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback // @ decreases // @ outline ( // @ sl.SplitByIndex_Bytes(data, 2+2, len(data), 2+2+addr.IABytes, R40) - // @ unfold acc(sl.Bytes(data, 2+2, 2+2+addr.IABytes), R40) - // @ assert forall i int :: { &data[offset:offset+addr.IABytes][i] } 0 <= i && i < addr.IABytes ==> &data[offset + i] == &data[offset : offset+addr.IABytes][i] + // @ unfold acc(sl.Bytes(data, 2+2, 2+2+addr.IABytes), R40) + // @ sl.AssertSliceOverlap(data, offset, offset+addr.IABytes) i.IA = addr.IA(binary.BigEndian.Uint64(data[offset : offset+addr.IABytes])) // @ fold acc(sl.Bytes(data, 2+2, 2+2+addr.IABytes), R40) // @ ) @@ -700,7 +708,7 @@ func (i *SCMPTraceroute) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback // @ outline ( // @ sl.SplitByIndex_Bytes(data, 2+2+addr.IABytes, len(data), 2+2+addr.IABytes+scmpRawInterfaceLen, R40) // @ unfold acc(sl.Bytes(data, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen), R40) - // @ assert forall i int :: { &data[offset:offset+scmpRawInterfaceLen][i] } 0 <= i && i < scmpRawInterfaceLen ==> &data[offset + i] == &data[offset : offset+addr.IABytes][i] + // @ sl.AssertSliceOverlap(data, offset, offset+scmpRawInterfaceLen) i.Interface = binary.BigEndian.Uint64(data[offset : offset+scmpRawInterfaceLen]) // @ fold acc(sl.Bytes(data, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen), R40) // @ ) @@ -745,7 +753,8 @@ func (i *SCMPTraceroute) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.S // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ sl.SplitByIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 2+2, writePerm) // @ unfold sl.Bytes(underlyingBufRes, 2, 2+2) - // @ assert forall i int :: { &buf[offset:offset+2][i] } 0 <= i && i < 2 ==> &buf[offset:offset+2][i] == &buf[offset + i] + // @ assert &buf[offset : offset+2][0] == &buf[offset] + // @ assert &buf[offset : offset+2][1] == &buf[offset+1] binary.BigEndian.PutUint16(buf[offset:offset+2], i.Sequence) // @ fold sl.Bytes(underlyingBufRes, 2, 2+2) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 2+2, writePerm) @@ -754,7 +763,7 @@ func (i *SCMPTraceroute) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.S // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2+2, writePerm) // @ sl.SplitByIndex_Bytes(underlyingBufRes, 2+2, len(underlyingBufRes), 2+2+addr.IABytes, writePerm) // @ unfold sl.Bytes(underlyingBufRes, 2+2, 2+2+addr.IABytes) - // @ assert forall i int :: { &buf[offset:offset+addr.IABytes][i] } 0 <= i && i < addr.IABytes ==> &buf[offset:offset+addr.IABytes][i] == &buf[offset + i] + // @ sl.AssertSliceOverlap(buf, offset, offset+addr.IABytes) binary.BigEndian.PutUint64(buf[offset:offset+addr.IABytes], uint64(i.IA)) // @ fold sl.Bytes(underlyingBufRes, 2+2, 2+2+addr.IABytes) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 2+2, len(underlyingBufRes), 2+2+addr.IABytes, writePerm) @@ -763,7 +772,7 @@ func (i *SCMPTraceroute) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.S // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2+2+addr.IABytes, writePerm) // @ sl.SplitByIndex_Bytes(underlyingBufRes, 2+2+addr.IABytes, len(underlyingBufRes), 2+2+addr.IABytes+scmpRawInterfaceLen, writePerm) // @ unfold sl.Bytes(underlyingBufRes, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen) - // @ assert forall i int :: { &buf[offset:offset+scmpRawInterfaceLen][i] } 0 <= i && i < scmpRawInterfaceLen ==> &buf[offset:offset+scmpRawInterfaceLen][i] == &buf[offset + i] + // @ sl.AssertSliceOverlap(buf, offset, offset+scmpRawInterfaceLen) binary.BigEndian.PutUint64(buf[offset:offset+scmpRawInterfaceLen], i.Interface) // @ fold sl.Bytes(underlyingBufRes, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 2+2+addr.IABytes, len(underlyingBufRes), 2+2+addr.IABytes+scmpRawInterfaceLen, writePerm) @@ -835,12 +844,13 @@ func (i *SCMPDestinationUnreachable) DecodeFromBytes(data []byte, // @ defer fold i.Mem(data) // @ defer fold i.BaseLayer.Mem(data, minLength) // @ unfold sl.Bytes(data, 0, len(data)) - // @ assert forall i int :: { &data[minLength:][i] } 0 <= i && i < len(data) - minLength ==> &data[minLength:][i] == &data[minLength + i] + // @ sl.AssertSliceOverlap(data, minLength, len(data)) i.BaseLayer = BaseLayer{ Contents: data[:minLength], Payload: data[minLength:], } - // @ assert forall l int :: { &i.Payload[l] } 0 <= l && l < len(i.Payload) ==> &data[minLength:][l] == &i.Payload[l] + // @ assert forall l int :: { &i.Payload[l] } 0 <= l && l < len(i.Payload) ==> + // @ &data[minLength:][l] == &i.Payload[l] // @ fold sl.Bytes(i.Contents, 0, len(i.Contents)) // @ fold sl.Bytes(i.Payload, 0, len(i.Payload)) return nil @@ -938,7 +948,8 @@ func (i *SCMPPacketTooBig) DecodeFromBytes(data []byte, df gopacket.DecodeFeedba // @ sl.SplitByIndex_Bytes(data, 0, len(data), 2, writePerm) // @ sl.SplitByIndex_Bytes(data, 2, len(data), 4, writePerm) // @ unfold sl.Bytes(data, 2, 4) - // @ assert forall i int :: { &data[2:4][i] } 0 <= i && i < 2 ==> &data[2:4][i] == &data[2 + i] + // @ assert &data[2:4][0] == &data[2] + // @ assert &data[2:4][1] == &data[3] i.MTU = binary.BigEndian.Uint16(data[2:4]) // @ fold sl.Bytes(data, 2, 4) // @ sl.CombineAtIndex_Bytes(data, 0, 4, 2, writePerm) @@ -951,12 +962,13 @@ func (i *SCMPPacketTooBig) DecodeFromBytes(data []byte, df gopacket.DecodeFeedba // @ decreases // @ outline ( // @ unfold sl.Bytes(data, 0, len(data)) - // @ assert forall i int :: { &data[4:][i] } 0 <= i && i < len(data) ==> &data[4:][i] == &data[4 + i] + // @ sl.AssertSliceOverlap(data, 4, len(data)) i.BaseLayer = BaseLayer{ Contents: data[:4], Payload: data[4:], } - // @ assert forall l int :: { &i.Payload[l] } 0 <= l && l < len(i.Payload) ==> &data[4+l] == &i.Payload[l] + // @ assert forall l int :: { &i.Payload[l] } 0 <= l && l < len(i.Payload) ==> + // @ &data[4+l] == &i.Payload[l] // @ fold sl.Bytes(i.Contents, 0, len(i.Contents)) // @ fold sl.Bytes(i.Payload, 0, len(i.Payload)) // @ fold i.BaseLayer.Mem(data, 4) @@ -990,7 +1002,8 @@ func (i *SCMPPacketTooBig) SerializeTo(b gopacket.SerializeBuffer, opts gopacket // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ sl.SplitByIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 4, writePerm) // @ unfold sl.Bytes(underlyingBufRes, 2, 4) - // @ assert forall i int :: { &buf[2:4][i] } 0 <= i && i < 2 ==> &buf[2:4][i] == &buf[2 + i] + // @ assert &buf[2:4][0] == &buf[2] + // @ assert &buf[2:4][1] == &buf[3] binary.BigEndian.PutUint16(buf[2:4], i.MTU) // @ fold sl.Bytes(underlyingBufRes, 2, 4) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 4, writePerm) diff --git a/router/assumptions.gobra b/router/assumptions.gobra index 6a635c96b..7634e35d6 100644 --- a/router/assumptions.gobra +++ b/router/assumptions.gobra @@ -96,10 +96,10 @@ func establishInvalidDstIA() ghost trusted -requires acc(err.ErrorMem(), _) +requires err.ErrorMem() decreases err.ErrorMem() pure func (err scmpError) IsDuplicableMem() bool { - return unfolding acc(err.ErrorMem(), _) in + return unfolding err.ErrorMem() in err.Cause.IsDuplicableMem() } diff --git a/router/dataplane.go b/router/dataplane.go index 1ade5d5d7..34bfc7c51 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -2795,8 +2795,6 @@ func (p *scionPacketProcessor) currentInfoPointer( /*@ ghost ubScionL []byte @*/ scion.MetaLen + path.InfoLen*int(p.path.PathMeta.CurrINF)) } -// (VerifiedSCION) This could probably be made pure, but it is likely not beneficial, nor needed -// to expose the body of this function at the moment. // @ requires acc(p.scionLayer.Mem(ubScionL), R20) // @ requires acc(&p.path, R50) // @ requires p.path == p.scionLayer.GetPath(ubScionL) @@ -4326,7 +4324,7 @@ func updateSCIONLayer(rawPkt []byte, s *slayers.SCION, buffer gopacket.Serialize // https://fsnets.slack.com/archives/C8ADBBG0J/p1592805884250700 rawContents := buffer.Bytes() // @ assert !(reveal slayers.IsSupportedPkt(rawContents)) - // @ s.InferSizeOHP(rawPkt) + // @ s.ValidSizeOhpUb(rawPkt) // @ assert len(rawContents) <= len(rawPkt) // @ unfold sl.Bytes(rawPkt, 0, len(rawPkt)) // @ unfold acc(sl.Bytes(rawContents, 0, len(rawContents)), R20) diff --git a/router/dataplane_spec.gobra b/router/dataplane_spec.gobra index fd0d667ec..e1f340217 100644 --- a/router/dataplane_spec.gobra +++ b/router/dataplane_spec.gobra @@ -200,72 +200,74 @@ func (d *DataPlane) getForwardingMetrics() { } ghost -requires acc(d.Mem(), _) +requires d.Mem() decreases pure func (d *DataPlane) getValForwardingMetrics() map[uint16]forwardingMetrics { - return unfolding acc(d.Mem(), _) in d.forwardingMetrics + return unfolding d.Mem() in + d.forwardingMetrics } ghost -pure -requires acc(p.sInit(), _) +requires p.sInit() decreases -func (p *scionPacketProcessor) getIngressID() uint16 { - return unfolding acc(p.sInit(), _) in p.ingressID +pure func (p *scionPacketProcessor) getIngressID() uint16 { + return unfolding p.sInit() in + p.ingressID } ghost -requires acc(d.Mem(), _) +requires d.Mem() decreases pure func (d *DataPlane) getMacFactory() func() hash.Hash { - return unfolding acc(d.Mem(), _) in d.macFactory + return unfolding d.Mem() in + d.macFactory } ghost -requires acc(d.Mem(), _) +requires d.Mem() decreases pure func (d *DataPlane) getDomForwardingMetrics() set[uint16] { - return unfolding acc(d.Mem(), _) in + return unfolding d.Mem() in d.forwardingMetrics == nil ? set[uint16]{} : - (unfolding acc(accForwardingMetrics(d.forwardingMetrics), _) in + (unfolding accForwardingMetrics(d.forwardingMetrics) in domain(d.forwardingMetrics)) } ghost -requires acc(d.Mem(), _) +requires d.Mem() decreases pure func (d *DataPlane) GetDomInternalNextHops() set[uint16] { - return unfolding acc(d.Mem(), _) in + return unfolding d.Mem() in d.internalNextHops == nil ? set[uint16]{} : - (unfolding acc(accAddr(d.internalNextHops), _) in + (unfolding accAddr(d.internalNextHops) in domain(d.internalNextHops)) } ghost opaque -requires acc(d.Mem(), _) +requires d.Mem() decreases pure func (d *DataPlane) getDomExternal() set[uint16] { - return unfolding acc(d.Mem(), _) in + return unfolding d.Mem() in d.external == nil ? set[uint16]{} : - (unfolding acc(accBatchConn(d.external), _) in + (unfolding accBatchConn(d.external) in domain(d.external)) } ghost -requires acc(d.Mem(), _) +requires d.Mem() decreases pure func (d *DataPlane) getDomNeighborIAs() set[uint16] { - return unfolding acc(d.Mem(), _) in + return unfolding d.Mem() in d.neighborIAs == nil ? set[uint16]{} : domain(d.neighborIAs) } ghost -requires acc(d.Mem(), _) +requires d.Mem() decreases pure func (d *DataPlane) getDomLinkTypes() set[uint16] { return unfolding acc(d.Mem(), _) in @@ -275,7 +277,7 @@ pure func (d *DataPlane) getDomLinkTypes() set[uint16] { ghost opaque -requires acc(d.Mem(), _) +requires d.Mem() decreases pure func (d *DataPlane) WellConfigured() bool { return d.getDomNeighborIAs() == d.getDomExternal() && @@ -286,7 +288,7 @@ pure func (d *DataPlane) WellConfigured() bool { ghost opaque -requires acc(d.Mem(), _) +requires d.Mem() decreases pure func (d *DataPlane) PreWellConfigured() bool { return d.getDomNeighborIAs() == d.getDomExternal() && @@ -378,17 +380,19 @@ func (d *DataPlane) getRunningMem() { } ghost -requires acc(d.Mem(), _) +requires d.Mem() decreases pure func (d *DataPlane) getValSvc() *services { - return unfolding acc(d.Mem(), _) in d.svc + return unfolding d.Mem() in + d.svc } ghost -requires acc(d.Mem(), _) +requires d.Mem() decreases pure func (d *DataPlane) SvcsAreSet() bool { - return unfolding acc(d.Mem(), _) in d.svc != nil + return unfolding d.Mem() in + d.svc != nil } ghost @@ -431,76 +435,76 @@ func (d *DataPlane) getNewPacketProcessorFootprint() { type Unit struct{} ghost -pure -requires acc(d.Mem(), _) +requires d.Mem() decreases -func (d *DataPlane) IsRunning() bool { - return unfolding acc(d.Mem(), _) in d.running +pure func (d *DataPlane) IsRunning() bool { + return unfolding d.Mem() in + d.running } ghost opaque -pure -requires acc(d.Mem(), _) -requires acc(&d.running, _) +requires d.Mem() +requires acc(&d.running) ensures d.IsRunning() == d.running decreases -func (d *DataPlane) isRunningEq() Unit { - return unfolding acc(d.Mem(), _) in Unit{} +pure func (d *DataPlane) isRunningEq() Unit { + return unfolding d.Mem() in + Unit{} } ghost -pure -requires acc(d.Mem(), _) +requires d.Mem() decreases -func (d *DataPlane) InternalConnIsSet() bool { - return unfolding acc(d.Mem(), _) in d.internal != nil +pure func (d *DataPlane) InternalConnIsSet() bool { + return unfolding d.Mem() in + d.internal != nil } ghost -pure -requires acc(d.Mem(), _) +requires d.Mem() decreases -func (d *DataPlane) MetricsAreSet() bool { - return unfolding acc(d.Mem(), _) in d.Metrics != nil +pure func (d *DataPlane) MetricsAreSet() bool { + return unfolding d.Mem() in + d.Metrics != nil } ghost opaque -pure -requires acc(d.Mem(), _) -requires acc(&d.internal, _) +requires d.Mem() +requires acc(&d.internal) ensures d.InternalConnIsSet() == (d.internal != nil) decreases -func (d *DataPlane) internalIsSetEq() Unit { - return unfolding acc(d.Mem(), _) in Unit{} +pure func (d *DataPlane) internalIsSetEq() Unit { + return unfolding d.Mem() in + Unit{} } ghost -pure -requires acc(d.Mem(), _) +requires d.Mem() decreases -func (d *DataPlane) KeyIsSet() bool { - return unfolding acc(d.Mem(), _) in d.macFactory != nil +pure func (d *DataPlane) KeyIsSet() bool { + return unfolding d.Mem() in + d.macFactory != nil } ghost opaque -pure -requires acc(d.Mem(), _) -requires acc(&d.macFactory, _) +requires d.Mem() +requires acc(&d.macFactory) ensures d.KeyIsSet() == (d.macFactory != nil) decreases -func (d *DataPlane) keyIsSetEq() Unit { - return unfolding acc(d.Mem(), _) in Unit{} +pure func (d *DataPlane) keyIsSetEq() Unit { + return unfolding d.Mem() in + Unit{} } ghost -pure -requires acc(d.Mem(), _) +requires d.Mem() decreases -func (d *DataPlane) LocalIA() addr.IA { - return unfolding acc(d.Mem(), _) in d.localIA +pure func (d *DataPlane) LocalIA() addr.IA { + return unfolding d.Mem() in + d.localIA } /**** End of acessor methods to avoid unfolding the Mem predicate of the dataplane so much ****/ @@ -547,17 +551,16 @@ func (s *scmpError) Set(e error) { } ghost -pure -requires acc(s.Mem(), _) +requires s.Mem() decreases -func (s *scmpError) Get() error { - return unfolding acc(s.Mem(), _) in *s +pure func (s *scmpError) Get() error { + return unfolding s.Mem() in + *s } ghost -pure decreases -func (s *scmpError) CanSet(e error) bool { +pure func (s *scmpError) CanSet(e error) bool { return typeOf(e) == type[scmpError] } @@ -591,52 +594,59 @@ pred (s* scionPacketProcessor) sInit() { // each ghost method on *scionPacketProcessor has, in the name, the state in which it // expects to find the packet processor. In the case below, the state `Init` is expected. ghost -requires acc(s.sInit(), _) +requires s.sInit() decreases pure func (s* scionPacketProcessor) sInitD() (res *DataPlane) { - return unfolding acc(s.sInit(), _) in s.d + return unfolding s.sInit() in + s.d } ghost -requires acc(s.sInit(), _) +requires s.sInit() decreases pure func (s* scionPacketProcessor) sInitRawPkt() (res []byte) { - return unfolding acc(s.sInit(), _) in s.rawPkt + return unfolding s.sInit() in + s.rawPkt } ghost -requires acc(s.sInit(), _) +requires s.sInit() decreases pure func (s* scionPacketProcessor) sInitPath() (res *scion.Raw) { - return unfolding acc(s.sInit(), _) in s.path + return unfolding s.sInit() in + s.path } ghost -requires acc(s.sInit(), _) +requires s.sInit() decreases pure func (s* scionPacketProcessor) sInitHopField() (res path.HopField) { - return unfolding acc(s.sInit(), _) in s.hopField + return unfolding s.sInit() in + s.hopField } ghost -requires acc(s.sInit(), _) +requires s.sInit() decreases pure func (s* scionPacketProcessor) sInitInfoField() (res path.InfoField) { - return unfolding acc(s.sInit(), _) in s.infoField + return unfolding s.sInit() in + s.infoField } ghost -requires acc(s.sInit(), _) +requires s.sInit() decreases pure func (s* scionPacketProcessor) sInitSegmentChange() (res bool) { - return unfolding acc(s.sInit(), _) in s.segmentChange + return unfolding s.sInit() in + s.segmentChange } ghost -requires acc(s.sInit(), _) +requires s.sInit() decreases pure func (s* scionPacketProcessor) sInitBufferUBuf() (res []byte) { - return unfolding acc(s.sInit(), _) in s.buffer.UBuf() + return unfolding s.sInit() in + s.buffer.UBuf() } /** end spec for newPacketProcessor **/ @@ -709,21 +719,21 @@ func (d *DataPlane) InDomainExternalInForwardingMetrics3(id uint16) { } ghost -requires acc(&d.forwardingMetrics, _) -requires acc(accForwardingMetrics(d.forwardingMetrics), _) +requires acc(&d.forwardingMetrics) +requires accForwardingMetrics(d.forwardingMetrics) decreases pure func (d *DataPlane) domainForwardingMetrics() set[uint16] { - return unfolding acc(accForwardingMetrics(d.forwardingMetrics), _) in + return unfolding accForwardingMetrics(d.forwardingMetrics) in domain(d.forwardingMetrics) } ghost -requires acc(d.Mem(), _) +requires d.Mem() decreases pure func (d *DataPlane) DomainForwardingMetrics() set[uint16] { - return unfolding acc(d.Mem(), _) in + return unfolding d.Mem() in d.forwardingMetrics != nil ? - unfolding acc(accForwardingMetrics(d.forwardingMetrics), _) in + unfolding accForwardingMetrics(d.forwardingMetrics) in domain(d.forwardingMetrics) : set[uint16]{} } diff --git a/router/io-spec-lemmas.gobra b/router/io-spec-lemmas.gobra index ba26a992a..15c0d4475 100644 --- a/router/io-spec-lemmas.gobra +++ b/router/io-spec-lemmas.gobra @@ -102,28 +102,27 @@ func AbsValidateIngressIDXoverLemma(oldPkt io.IO_pkt2, newPkt io.IO_pkt2, ingres ghost opaque -requires acc(p.scionLayer.Mem(ub), R50) -requires acc(&p.d, R55) && acc(p.d.Mem(), _) -requires acc(&p.ingressID, R55) +requires p.scionLayer.Mem(ub) +requires acc(&p.d) && p.d.Mem() +requires acc(&p.ingressID) decreases pure func (p *scionPacketProcessor) DstIsLocalIngressID(ub []byte) bool { - return (unfolding acc(p.scionLayer.Mem(ub), R50) in - (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in - p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> p.ingressID != 0 + return (unfolding p.scionLayer.Mem(ub) in + (unfolding p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]) in p.scionLayer.DstIA) == (unfolding p.d.Mem() in p.d.localIA)) ==> + p.ingressID != 0 } ghost opaque -requires acc(p.scionLayer.Mem(ub), R50) -requires acc(&p.d, R55) && acc(p.d.Mem(), _) -requires acc(&p.ingressID, R55) -requires acc(sl.Bytes(ub, 0, len(ub)), R56) +requires p.scionLayer.Mem(ub) +requires acc(&p.d) && p.d.Mem() +requires acc(&p.ingressID) +requires sl.Bytes(ub, 0, len(ub)) requires slayers.ValidPktMetaHdr(ub) decreases pure func (p *scionPacketProcessor) LastHopLen(ub []byte) bool { - return (unfolding acc(p.scionLayer.Mem(ub), R50) in - (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in - p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> + return (unfolding p.scionLayer.Mem(ub) in + (unfolding p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]) in p.scionLayer.DstIA) == (unfolding p.d.Mem() in p.d.localIA)) ==> len(absPkt(ub).CurrSeg.Future) == 1 } @@ -153,19 +152,19 @@ func (p* scionPacketProcessor) LocalDstLemma(ub []byte) { } ghost -requires acc(p.scionLayer.Mem(ub), R55) -requires acc(&p.path, R55) && p.path == p.scionLayer.GetPath(ub) +requires p.scionLayer.Mem(ub) +requires acc(&p.path) && p.path == p.scionLayer.GetPath(ub) decreases pure func (p* scionPacketProcessor) GetIsXoverSpec(ub []byte) bool { return let ubPath := p.scionLayer.UBPath(ub) in - unfolding acc(p.scionLayer.Mem(ub), R55) in + unfolding p.scionLayer.Mem(ub) in p.path.GetBase(ubPath).IsXoverSpec() } ghost opaque -requires acc(&p.d, R55) && acc(p.d.Mem(), _) -requires acc(&p.ingressID, R55) +requires acc(&p.d) && p.d.Mem() +requires acc(&p.ingressID) requires pkt.PathNotFullyTraversed() decreases pure func (p *scionPacketProcessor) NoBouncingPkt(pkt io.IO_pkt2) bool { @@ -368,7 +367,7 @@ func (p* scionPacketProcessor) SubSliceAbsPktToAbsPkt(ub []byte, start int, end ghost opaque -requires acc(&p.hopField, R55) +requires acc(&p.hopField) requires pkt.PathNotFullyTraversed() decreases pure func (p* scionPacketProcessor) EqAbsHopField(pkt io.IO_pkt2) bool { @@ -379,13 +378,13 @@ pure func (p* scionPacketProcessor) EqAbsHopField(pkt io.IO_pkt2) bool { ghost opaque -requires acc(&p.infoField, R55) +requires acc(&p.infoField) decreases pure func (p* scionPacketProcessor) EqAbsInfoField(pkt io.IO_pkt2) bool { return let absInf := p.infoField.ToAbsInfoField() in - let currseg := pkt.CurrSeg in - absInf.AInfo == currseg.AInfo && - absInf.UInfo == currseg.UInfo && + let currseg := pkt.CurrSeg in + absInf.AInfo == currseg.AInfo && + absInf.UInfo == currseg.UInfo && absInf.ConsDir == currseg.ConsDir && absInf.Peer == currseg.Peer } \ No newline at end of file diff --git a/router/io-spec.gobra b/router/io-spec.gobra index d1afe6f3c..d57819119 100644 --- a/router/io-spec.gobra +++ b/router/io-spec.gobra @@ -32,14 +32,14 @@ import ( ghost opaque -requires acc(sl.Bytes(raw, 0, len(raw)), R56) +requires sl.Bytes(raw, 0, len(raw)) requires slayers.ValidPktMetaHdr(raw) decreases pure func absPkt(raw []byte) (res io.IO_pkt2) { return let _ := reveal slayers.ValidPktMetaHdr(raw) in let headerOffset := slayers.GetAddressOffset(raw) in let headerOffsetWithMetaLen := headerOffset + scion.MetaLen in - let hdr := (unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in + let hdr := (unfolding sl.Bytes(raw, 0, len(raw)) in binary.BigEndian.Uint32(raw[headerOffset:headerOffset+scion.MetaLen])) in let metaHdr := scion.DecodedFrom(hdr) in let currInfIdx := int(metaHdr.CurrINF) in @@ -53,20 +53,20 @@ pure func absPkt(raw []byte) (res io.IO_pkt2) { let numINF := segs.NumInfoFields() in let offset := scion.HopFieldOffset(numINF, prevSegLen, headerOffsetWithMetaLen) in io.IO_Packet2 { - CurrSeg : scion.CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen), - LeftSeg : scion.LeftSeg(raw, currInfIdx + 1, segs, headerOffsetWithMetaLen), - MidSeg : scion.MidSeg(raw, currInfIdx + 2, segs, headerOffsetWithMetaLen), - RightSeg : scion.RightSeg(raw, currInfIdx - 1, segs, headerOffsetWithMetaLen), + CurrSeg: scion.CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen), + LeftSeg: scion.LeftSeg(raw, currInfIdx + 1, segs, headerOffsetWithMetaLen), + MidSeg: scion.MidSeg(raw, currInfIdx + 2, segs, headerOffsetWithMetaLen), + RightSeg: scion.RightSeg(raw, currInfIdx - 1, segs, headerOffsetWithMetaLen), } } ghost -requires acc(sl.Bytes(raw, 0, len(raw)), R56) +requires sl.Bytes(raw, 0, len(raw)) ensures val.isIO_val_Unsupported ensures val.IO_val_Unsupported_1 == path.ifsToIO_ifs(ingressID) decreases pure func absIO_val_Unsupported(raw []byte, ingressID uint16) (val io.IO_val) { - return io.IO_val(io.IO_val_Unsupported{ + return io.IO_val(io.IO_val_Unsupported { path.ifsToIO_ifs(ingressID), io.Unit{}, }) @@ -74,7 +74,7 @@ pure func absIO_val_Unsupported(raw []byte, ingressID uint16) (val io.IO_val) { ghost opaque -requires acc(sl.Bytes(raw, 0, len(raw)), R56) +requires sl.Bytes(raw, 0, len(raw)) ensures val.isIO_val_Pkt2 || val.isIO_val_Unsupported decreases pure func absIO_val(raw []byte, ingressID uint16) (val io.IO_val) { @@ -95,7 +95,7 @@ func AbsUnsupportedPktIsUnsupportedVal(raw []byte, ingressID uint16) { ghost requires respr.OutPkt != nil ==> - acc(sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)), R56) + sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) decreases pure func absReturnErr(respr processResult) (val io.IO_val) { return respr.OutPkt == nil ? io.IO_val_Unit{} : @@ -103,14 +103,15 @@ pure func absReturnErr(respr processResult) (val io.IO_val) { } ghost -requires acc(&d.localIA, _) +requires acc(&d.localIA) decreases pure func (d *DataPlane) dpSpecWellConfiguredLocalIA(dp io.DataPlaneSpec) bool { return io.IO_as(d.localIA) == dp.Asid() } ghost -requires acc(&d.neighborIAs, _) && (d.neighborIAs != nil ==> acc(d.neighborIAs, _)) +requires acc(&d.neighborIAs) +requires d.neighborIAs != nil ==> acc(d.neighborIAs) decreases pure func (d *DataPlane) dpSpecWellConfiguredNeighborIAs(dp io.DataPlaneSpec) bool { return forall ifs uint16 :: {ifs in domain(d.neighborIAs)} ifs in domain(d.neighborIAs) ==> @@ -129,7 +130,8 @@ pure func absLinktype(link topology.LinkType) io.IO_Link { } ghost -requires acc(&d.linkTypes, _) && (d.linkTypes != nil ==> acc(d.linkTypes, _)) +requires acc(&d.linkTypes) +requires d.linkTypes != nil ==> acc(d.linkTypes) decreases pure func (d *DataPlane) dpSpecWellConfiguredLinkTypes(dp io.DataPlaneSpec) bool { return forall ifs uint16 :: {ifs in domain(d.linkTypes)} ifs in domain(d.linkTypes) ==> @@ -139,10 +141,10 @@ pure func (d *DataPlane) dpSpecWellConfiguredLinkTypes(dp io.DataPlaneSpec) bool ghost opaque -requires acc(d.Mem(), _) +requires d.Mem() decreases pure func (d *DataPlane) DpAgreesWithSpec(dp io.DataPlaneSpec) bool { - return unfolding acc(d.Mem(), _) in + return unfolding d.Mem() in d.dpSpecWellConfiguredLocalIA(dp) && d.dpSpecWellConfiguredNeighborIAs(dp) && d.dpSpecWellConfiguredLinkTypes(dp) @@ -198,10 +200,10 @@ func (d *DataPlane) getDomExternalLemma() { } ghost -requires acc(msg.Mem(), R50) -requires acc(sl.Bytes(msg.GetFstBuffer(), 0, len(msg.GetFstBuffer())), R50) +requires msg.Mem() +requires sl.Bytes(msg.GetFstBuffer(), 0, len(msg.GetFstBuffer())) decreases pure func MsgToAbsVal(msg *ipv4.Message, ingressID uint16) (res io.IO_val) { - return unfolding acc(msg.Mem(), R50) in + return unfolding msg.Mem() in absIO_val(msg.Buffers[0], ingressID) } diff --git a/verification/dependencies/bytes/bytes.gobra b/verification/dependencies/bytes/bytes.gobra index dbfcbc260..667e17534 100644 --- a/verification/dependencies/bytes/bytes.gobra +++ b/verification/dependencies/bytes/bytes.gobra @@ -18,8 +18,8 @@ import sl "github.com/scionproto/scion/verification/utils/slices" // are the same length and contain the same bytes. // A nil argument is equivalent to an empty slice. trusted -requires acc(sl.Bytes(a, 0, len(a)), _) -requires acc(sl.Bytes(b, 0, len(b)), _) +requires sl.Bytes(a, 0, len(a)) +requires sl.Bytes(b, 0, len(b)) decreases pure func Equal(a, b []byte) bool { return string(a) == string(b) diff --git a/verification/dependencies/crypto/cipher/cipher.gobra b/verification/dependencies/crypto/cipher/cipher.gobra index 1fa8e5ab4..43d4d6656 100644 --- a/verification/dependencies/crypto/cipher/cipher.gobra +++ b/verification/dependencies/crypto/cipher/cipher.gobra @@ -25,7 +25,7 @@ type Block interface { // BlockSize returns the cipher's block size. pure - requires acc(Mem(), _) + requires Mem() ensures 0 <= n decreases BlockSize() (n int) @@ -76,7 +76,7 @@ type BlockMode interface { // BlockSize returns the mode's block size. pure - requires acc(Mem(), _) + requires Mem() ensures 0 <= n decreases BlockSize() (n int) diff --git a/verification/dependencies/crypto/subtle/constant_time.gobra b/verification/dependencies/crypto/subtle/constant_time.gobra index 8ecebd1c8..68b411b07 100644 --- a/verification/dependencies/crypto/subtle/constant_time.gobra +++ b/verification/dependencies/crypto/subtle/constant_time.gobra @@ -14,11 +14,9 @@ import sl "github.com/scionproto/scion/verification/utils/slices" // ConstantTimeCompare returns 1 if the two slices, x and y, have equal contents // and 0 otherwise. The time taken is a function of the length of the slices and // is independent of the contents. -requires acc(sl.Bytes(x, 0, len(x)), _) -requires acc(sl.Bytes(y, 0, len(y)), _) -// postconditions hidden for now: -// ensures unfolding sl.Bytes(x, 0, len(x)) in (unfolding sl.Bytes(y, 0, len(y)) in len(x) == len(y) ==> (forall i int :: 0 <= i && i < len(x) ==> x[i] == y[i]) ==> res == 1) -// ensures unfolding sl.Bytes(x, 0, len(x)) in (unfolding sl.Bytes(y, 0, len(y)) in len(x) == len(y) ==> !(forall i int :: 0 <= i && i < len(x) ==> x[i] == y[i]) ==> res == 0) -ensures len(x) != len(y) ==> res == 0 -decreases _ +trusted +requires sl.Bytes(x, 0, len(x)) +requires sl.Bytes(y, 0, len(y)) +ensures len(x) != len(y) ==> res == 0 +decreases pure func ConstantTimeCompare(x, y []byte) (res int) diff --git a/verification/dependencies/encoding/binary/binary.gobra b/verification/dependencies/encoding/binary/binary.gobra index af43eb984..d4e99283a 100644 --- a/verification/dependencies/encoding/binary/binary.gobra +++ b/verification/dependencies/encoding/binary/binary.gobra @@ -12,18 +12,18 @@ package binary // A ByteOrder specifies how to convert byte sequences into // 16-, 32-, or 64-bit unsigned integers. type ByteOrder interface { - requires acc(&b[0], _) && acc(&b[1], _) + requires acc(&b[0]) && acc(&b[1]) ensures res >= 0 decreases pure Uint16(b []byte) (res uint16) - requires acc(&b[0], _) && acc(&b[1], _) && acc(&b[2], _) && acc(&b[3], _) + requires acc(&b[0]) && acc(&b[1]) && acc(&b[2]) && acc(&b[3]) ensures res >= 0 decreases pure Uint32(b []byte) (res uint32) - requires acc(&b[0], _) && acc(&b[1], _) && acc(&b[2], _) && acc(&b[3], _) - requires acc(&b[4], _) && acc(&b[5], _) && acc(&b[6], _) && acc(&b[7], _) + requires acc(&b[0]) && acc(&b[1]) && acc(&b[2]) && acc(&b[3]) + requires acc(&b[4]) && acc(&b[5]) && acc(&b[6]) && acc(&b[7]) ensures res >= 0 decreases pure Uint64(b []byte) (res uint64) @@ -62,7 +62,7 @@ type bigEndian int (bigEndian) implements ByteOrder trusted // related to https://github.com/viperproject/gobra/issues/192 -requires acc(&b[0], _) && acc(&b[1], _) +requires acc(&b[0]) && acc(&b[1]) ensures res >= 0 decreases pure func (e littleEndian) Uint16(b []byte) (res uint16) { @@ -77,7 +77,7 @@ func (e littleEndian) PutUint16(b []byte, v uint16) { } trusted // related to https://github.com/viperproject/gobra/issues/192 -requires acc(&b[0], _) && acc(&b[1], _) && acc(&b[2], _) && acc(&b[3], _) +requires acc(&b[0]) && acc(&b[1]) && acc(&b[2]) && acc(&b[3]) ensures 0 <= res ensures res == uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 decreases @@ -95,8 +95,8 @@ func (e littleEndian) PutUint32(b []byte, v uint32) { } trusted // related to https://github.com/viperproject/gobra/issues/192 -requires acc(&b[0], _) && acc(&b[1], _) && acc(&b[2], _) && acc(&b[3], _) -requires acc(&b[4], _) && acc(&b[5], _) && acc(&b[6], _) && acc(&b[7], _) +requires acc(&b[0]) && acc(&b[1]) && acc(&b[2]) && acc(&b[3]) +requires acc(&b[4]) && acc(&b[5]) && acc(&b[6]) && acc(&b[7]) ensures res >= 0 decreases pure func (e littleEndian) Uint64(b []byte) (res uint64) { @@ -134,7 +134,7 @@ pure func (e bigEndian) Uint16Spec(b0, b1 byte) (res uint16) { } trusted // related to https://github.com/viperproject/gobra/issues/192 -requires acc(&b[0], _) && acc(&b[1], _) +requires acc(&b[0]) && acc(&b[1]) ensures res >= 0 ensures res == BigEndian.Uint16Spec(b[0], b[1]) decreases @@ -167,7 +167,7 @@ pure func (e bigEndian) Uint32Spec(b0, b1, b2, b3 byte) (res uint32) { } trusted // related to https://github.com/viperproject/gobra/issues/192 -requires acc(&b[0], _) && acc(&b[1], _) && acc(&b[2], _) && acc(&b[3], _) +requires acc(&b[0]) && acc(&b[1]) && acc(&b[2]) && acc(&b[3]) ensures 0 <= res ensures res == BigEndian.Uint32Spec(b[0], b[1], b[2], b[3]) decreases @@ -197,8 +197,8 @@ func (e bigEndian) PutUint32(b []byte, v uint32) { } trusted // related to https://github.com/viperproject/gobra/issues/192 -requires acc(&b[0], _) && acc(&b[1], _) && acc(&b[2], _) && acc(&b[3], _) -requires acc(&b[4], _) && acc(&b[5], _) && acc(&b[6], _) && acc(&b[7], _) +requires acc(&b[0]) && acc(&b[1]) && acc(&b[2]) && acc(&b[3]) +requires acc(&b[4]) && acc(&b[5]) && acc(&b[6]) && acc(&b[7]) ensures res >= 0 decreases pure func (e bigEndian) Uint64(b []byte) (res uint64) { diff --git a/verification/dependencies/github.com/google/gopacket/layerclass.gobra b/verification/dependencies/github.com/google/gopacket/layerclass.gobra index 9d0b48adf..a139bb8f1 100644 --- a/verification/dependencies/github.com/google/gopacket/layerclass.gobra +++ b/verification/dependencies/github.com/google/gopacket/layerclass.gobra @@ -30,10 +30,9 @@ LayerType implements LayerClass pred (l LayerType) Mem() { true } // Contains implements LayerClass. -pure ensures res == (l == a) decreases -func (l LayerType) Contains(a LayerType) (res bool) { +pure func (l LayerType) Contains(a LayerType) (res bool) { return l == a } diff --git a/verification/dependencies/github.com/google/gopacket/layertype.gobra b/verification/dependencies/github.com/google/gopacket/layertype.gobra index 018d721dc..9afc2f935 100644 --- a/verification/dependencies/github.com/google/gopacket/layertype.gobra +++ b/verification/dependencies/github.com/google/gopacket/layertype.gobra @@ -48,8 +48,7 @@ pred LayerTypesMem() { } ghost -requires acc(LayerTypesMem(), _) -ensures res == unfolding acc(LayerTypesMem(), _) in (0 <= t && t < MaxLayerType? ltMeta[t].inUse : (t in domain(ltMetaMap) && ltMetaMap[t].inUse)) +requires LayerTypesMem() decreases pure func Registered(t LayerType) (res bool) { return unfolding acc(LayerTypesMem(), _) in @@ -57,15 +56,22 @@ pure func Registered(t LayerType) (res bool) { } ghost -requires acc(<Meta, _) && acc(<MetaMap, _) && acc(ltMetaMap, _) && acc(&DecodersByLayerName, _) && acc(DecodersByLayerName, _) -ensures res == (0 <= t && t < MaxLayerType? ltMeta[t].inUse : (t in domain(ltMetaMap) && ltMetaMap[t].inUse)) +requires acc(<Meta) && + acc(<MetaMap) && + acc(ltMetaMap) && + acc(&DecodersByLayerName) && + acc(DecodersByLayerName) decreases pure func registeredDuringInit(t LayerType) (res bool) { return (0 <= t && t < MaxLayerType? ltMeta[t].inUse : (t in domain(ltMetaMap) && ltMetaMap[t].inUse)) } // cannot be made ghost, even though it is morally so -requires acc(<Meta) && acc(<MetaMap) && acc(ltMetaMap) && acc(&DecodersByLayerName, _) && acc(DecodersByLayerName) +requires acc(<Meta) && + acc(<MetaMap) && + acc(ltMetaMap) && + acc(&DecodersByLayerName, _) && + acc(DecodersByLayerName) requires forall t LayerType :: { registeredDuringInit(t) } !registeredDuringInit(t) ensures LayerTypesMem() ensures forall t LayerType :: { Registered(t) } !Registered(t) diff --git a/verification/dependencies/github.com/prometheus/client_golang/prometheus/lemmas.gobra b/verification/dependencies/github.com/prometheus/client_golang/prometheus/lemmas.gobra index b7ccc9566..9fb025918 100644 --- a/verification/dependencies/github.com/prometheus/client_golang/prometheus/lemmas.gobra +++ b/verification/dependencies/github.com/prometheus/client_golang/prometheus/lemmas.gobra @@ -22,7 +22,7 @@ type Unit struct{} // even though the precondition morally implies it. This should be // fixed by PR #536 of Gobra. ghost -requires acc(c.Mem(), _) +requires c.Mem() ensures c != nil decreases _ pure func CounterMemImpliesNonNil(c Counter) Unit \ No newline at end of file diff --git a/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra b/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra index 1684e668a..bd016a51c 100644 --- a/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra +++ b/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra @@ -60,54 +60,61 @@ pred (m *Message) Mem() { } ghost -requires acc(m.Mem(), _) +requires m.Mem() decreases pure func (m *Message) HasWildcardPermAddr() bool { - return unfolding acc(m.Mem(), _) in m.WildcardPerm + return unfolding m.Mem() in + m.WildcardPerm } ghost -requires acc(m.Mem(), _) +requires m.Mem() decreases pure func (m *Message) HasActiveAddr() bool { - return unfolding acc(m.Mem(), _) in m.IsActive + return unfolding m.Mem() in + m.IsActive } ghost -requires acc(m.Mem(), _) +requires m.Mem() decreases pure func (m *Message) GetAddr() net.Addr { - return unfolding acc(m.Mem(), _) in m.Addr + return unfolding m.Mem() in + m.Addr } ghost -requires acc(m.Mem(), _) +requires m.Mem() decreases pure func (m *Message) GetMessage() Message { - return unfolding acc(m.Mem(), _) in *m + return unfolding m.Mem() in + *m } ghost -requires acc(m.Mem(), _) +requires m.Mem() decreases pure func (m *Message) GetBuffer() []byte { - return unfolding acc(m.Mem(), _) in m.Buffers[0] + return unfolding m.Mem() in + m.Buffers[0] } ghost -requires acc(m.Mem(), _) +requires m.Mem() decreases pure func (m *Message) GetFstBuffer() []byte { - return unfolding acc(m.Mem(), _) in m.Buffers[0] + return unfolding m.Mem() in + m.Buffers[0] } ghost -requires acc(m.Mem(), _) +requires m.Mem() ensures 0 <= res decreases pure func (m *Message) GetN() (res int) { - return unfolding acc(m.Mem(), _) in m.N + return unfolding m.Mem() in + m.N } // This function establishes the injectivity of the underlying buffers across diff --git a/verification/dependencies/golang.org/x/net/ipv4/endpoint.gobra b/verification/dependencies/golang.org/x/net/ipv4/endpoint.gobra index b32881ddc..4e309ee71 100644 --- a/verification/dependencies/golang.org/x/net/ipv4/endpoint.gobra +++ b/verification/dependencies/golang.org/x/net/ipv4/endpoint.gobra @@ -83,8 +83,9 @@ decreases _ func NewPacketConn(c net.PacketConn) (p *PacketConn) ghost -requires acc(p.Mem(), _) -decreases _ +trusted +requires p.Mem() +decreases pure func (p *PacketConn) GetUnderlyingConn() (c net.PacketConn) ghost diff --git a/verification/dependencies/golang.org/x/net/ipv6/endpoint.gobra b/verification/dependencies/golang.org/x/net/ipv6/endpoint.gobra index 85061a9e9..5d817ce56 100644 --- a/verification/dependencies/golang.org/x/net/ipv6/endpoint.gobra +++ b/verification/dependencies/golang.org/x/net/ipv6/endpoint.gobra @@ -86,8 +86,9 @@ decreases _ func NewPacketConn(c net.PacketConn) (p *PacketConn) ghost -requires acc(p.Mem(), _) -decreases _ +trusted +requires p.Mem() +decreases pure func (p *PacketConn) GetUnderlyingConn() (c net.PacketConn) ghost diff --git a/verification/dependencies/math/big/int.gobra b/verification/dependencies/math/big/int.gobra index 1a4bc8ff1..688e65f5a 100644 --- a/verification/dependencies/math/big/int.gobra +++ b/verification/dependencies/math/big/int.gobra @@ -44,13 +44,14 @@ func (x *Int) Uint64() (res uint64) // TODO: This returns int when it should return a mathematical Integer ghost -requires acc(i.Mem(), R13) +requires i.Mem() decreases pure func (i *Int) toInt() int { - return (unfolding acc(i.Mem(), R13) in i.neg) ? -((unfolding acc(i.Mem(), R13) in i.abs.toInt())) : (unfolding acc(i.Mem(), R13) in i.abs.toInt()) + return (unfolding i.Mem() in i.neg) ? -((unfolding i.Mem() in i.abs.toInt())) : (unfolding i.Mem() in i.abs.toInt()) } // TODO: This returns int when it should return a mathematical Integer ghost +trusted decreases pure func toInt(n uint64) int diff --git a/verification/dependencies/math/big/nat.gobra b/verification/dependencies/math/big/nat.gobra index cf1c4174c..5f1531124 100644 --- a/verification/dependencies/math/big/nat.gobra +++ b/verification/dependencies/math/big/nat.gobra @@ -30,17 +30,17 @@ pred (n nat) Mem() { // TODO: This returns int when it should return a mathematical Integer ghost -requires acc(n.Mem(), R13) +requires n.Mem() decreases pure func (n nat) toInt() int { - return len(n) == 0 ? int(0) : unfolding acc(n.Mem(), R13) in toIntHelper(n, 0) + return len(n) == 0 ? int(0) : unfolding n.Mem() in toIntHelper(n, 0) } // TODO: This returns int when it should return a mathematical Integer ghost -requires acc(n, R13) +requires acc(n) requires 0 <= i && i < len(n) -decreases _ +decreases len(n) - i pure func toIntHelper (n nat, i int) int { return i == len(n) - 1 ? int(n[i]) : int(n[i]) + _W * toIntHelper(n, i + 1) } diff --git a/verification/dependencies/net/ip.gobra b/verification/dependencies/net/ip.gobra index 5f552f876..51dc448f3 100644 --- a/verification/dependencies/net/ip.gobra +++ b/verification/dependencies/net/ip.gobra @@ -68,7 +68,11 @@ ensures len(ip) == IPv4len ==> ip === res ensures (len(ip) == IPv6len && isZeros(ip[0:10]) && ip[10] == 255 && ip[11] == 255) ==> res != nil ensures (len(ip) == IPv6len && !(isZeros(ip[0:10]) && ip[10] == 255 && ip[11] == 255)) ==> res == nil ensures (len(ip) == IPv6len && res != nil) ==> - (forall i int :: { &ip[12+i] }{ &res[i] } 0 <= i && i < IPv4len ==> &ip[12+i] == &res[i]) + // even though it is technically unecessary, + // this assertion allows us to change this contract + // without breaking the clients atm. + let _ := sl.AssertSliceOverlap(ip, 12, 16) in + res === ip[12:16] ensures len(ip) != IPv4len && len(ip) != IPv6len ==> res == nil decreases func (ip IP) To4(ghost wildcard bool) (res IP) { @@ -84,10 +88,9 @@ func (ip IP) To4(ghost wildcard bool) (res IP) { return nil } -pure -preserves forall i int :: { &s[i] } 0 <= i && i < len(s) ==> acc(&s[i], R55) +preserves forall i int :: { &s[i] } 0 <= i && i < len(s) ==> acc(&s[i]) decreases -func isZeros(s []byte) bool +pure func isZeros(s []byte) bool // To16 converts the IP address ip to a 16-byte representation. preserves forall i int :: { &ip[i] } 0 <= i && i < len(ip) ==> acc(&ip[i], R15) @@ -132,11 +135,10 @@ func (ip *IP) UnmarshalText(text []byte) error // An IPv4 address and that same address in IPv6 form are // considered to be equal. // (VerifiedSCION) we consider this function to be morally pure -pure -requires acc(sl.Bytes(ip, 0, len(ip)), _) -requires acc(sl.Bytes(x, 0, len(x)), _) +requires sl.Bytes(ip, 0, len(ip)) +requires sl.Bytes(x, 0, len(x)) decreases _ -func (ip IP) Equal(x IP) bool +pure func (ip IP) Equal(x IP) bool // ParseIP parses s as an IP address, returning the result. ensures forall i int :: {&res[i]} 0 <= i && i < len(res) ==> acc(&res[i]) diff --git a/verification/dependencies/strings/strings.gobra b/verification/dependencies/strings/strings.gobra index 13fb98747..521896b3e 100644 --- a/verification/dependencies/strings/strings.gobra +++ b/verification/dependencies/strings/strings.gobra @@ -86,7 +86,7 @@ func Fields(s string) (res []string) // Join concatenates the elements of its first argument to create a single string. The separator // string sep is placed between elements in the resulting string. -requires forall i int :: { &elems[i] } 0 <= i && i < len(elems) ==> acc(&elems[i], _) +requires forall i int :: { &elems[i] } 0 <= i && i < len(elems) ==> acc(&elems[i]) ensures len(elems) == 0 ==> res == "" ensures len(elems) == 1 ==> res == elems[0] // (VerifiedSCION) Leads to precondition of call might not hold (permission to elems[i] might not suffice) @@ -95,18 +95,14 @@ decreases _ pure func Join(elems []string, sep string) (res string) // HasPrefix tests whether the string s begins with prefix. -pure -ensures ret == (len(s) >= len(prefix) && s[0:len(prefix)] == prefix) decreases -func HasPrefix(s, prefix string) (ret bool) { +pure func HasPrefix(s, prefix string) (ret bool) { return len(s) >= len(prefix) && (s[0:len(prefix)] == prefix) } // HasSuffix tests whether the string s ends with suffix. -pure -ensures ret == (len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix) decreases -func HasSuffix(s, suffix string) (ret bool) { +pure func HasSuffix(s, suffix string) (ret bool) { return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix } diff --git a/verification/dependencies/sync/mutex.gobra b/verification/dependencies/sync/mutex.gobra index 7b18c6566..43d512598 100644 --- a/verification/dependencies/sync/mutex.gobra +++ b/verification/dependencies/sync/mutex.gobra @@ -15,25 +15,30 @@ pred (m *Mutex) LockP() pred (m *Mutex) UnlockP() ghost -requires acc(m.LockP(), _) -decreases _ +trusted +requires m.LockP() +decreases pure func (m *Mutex) LockInv() pred() ghost +trusted requires inv() && acc(m) && *m == Mutex{} ensures m.LockP() && m.LockInv() == inv decreases func (m *Mutex) SetInv(ghost inv pred()) ghost -decreases _ +trusted +decreases pure func IgnoreBlockingForTermination() bool +trusted requires acc(m.LockP(), _) ensures m.LockP() && m.UnlockP() && m.LockInv()() decreases _ if IgnoreBlockingForTermination() func (m *Mutex) Lock() +trusted requires acc(m.LockP(), _) && m.UnlockP() && m.LockInv()() ensures m.LockP() decreases _ diff --git a/verification/dependencies/sync/waitgroup.gobra b/verification/dependencies/sync/waitgroup.gobra deleted file mode 100644 index a45b2de60..000000000 --- a/verification/dependencies/sync/waitgroup.gobra +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in https://golang.org/LICENSE - -// Warning: -// This file is ignored for now, as it is not immediately relevant for SCION. - -package sync - -// Definition according to https://github.com/golang/go/blob/master/src/sync/waitgroup.go -type WaitGroup struct { - // noCopy noCopy - state1 [3]uint32 -} - -pred (wg *WaitGroup) WaitGroupP() -pred (wg *WaitGroup) WaitGroupStarted() -pred (wg *WaitGroup) UnitDebt(p pred()) -pred (wg *WaitGroup) Token(t pred()) - -ghost -requires acc(g.WaitGroupP(), _) -decreases _ -pure func (g *WaitGroup) WaitMode() bool - -ghost -requires acc(g) && *g == WaitGroup{} -ensures g.WaitGroupP() && !g.WaitMode() -decreases -func (g *WaitGroup) Init() - -ghost -requires g.WaitGroupP() -ensures g.WaitGroupP() && !g.WaitMode() -decreases _ -func (g *WaitGroup) UnsetWaitMode() - -ghost -requires p > 0 -requires acc(g.WaitGroupP(), p) -requires !g.WaitMode() && g.UnitDebt(P) -ensures g.UnitDebt(P) && acc(g.WaitGroupStarted(), p) -decreases _ -func (g *WaitGroup) Start(ghost p perm, ghost P pred()) - -ghost -requires p >= 0 -requires q > 0 -requires p + q == 1 -requires acc(g.WaitGroupP(), p) -requires acc(g.WaitGroupStarted(), q) -ensures g.WaitGroupP() && g.WaitMode() -decreases _ -func (g *WaitGroup) SetWaitMode(ghost p perm, ghost q perm) - -// Simplified version of the debt redistribution rule. The most general version cannot be written in Gobra -// without support for magic wands. This corresponds to the first viewshift introduced in remark 8 of Martin's -// latest proposal for WaitGroups (as of 21/01/2021) -ghost -requires P() && g.UnitDebt(P) -ensures g.UnitDebt(PredTrue!) -decreases _ -func (g *WaitGroup) PayDebt(ghost P pred()) - -// Simplified version of the debt redistribution rule, instantiated with P == { PredTrue! } and Q == { R }. -// This is the only rule that generates a Token. -ghost -requires g.UnitDebt(PredTrue!) -ensures g.UnitDebt(R) && g.Token(R) -decreases _ -func (g *WaitGroup) GenerateTokenAndDebt(ghost R pred()) - -ghost -requires R() -ensures g.Token(R) -decreases _ -func (g *WaitGroup) GenerateToken(ghost R pred()) - -// Simplified version of Add as proposed in page 8 of Martin's latest document (as of 21/01/2021) -requires p >= 0 -requires n > 0 && p > 0 ==> acc(g.WaitGroupP(), p) && !g.WaitMode() -requires (n > 0 && p == 0) ==> g.UnitDebt(P) -requires n < 0 ==> acc(g.UnitDebt(PredTrue!), -n/1) -ensures (n > 0 && p > 0) ==> acc(g.WaitGroupP(), p) -ensures (n > 0 && p == 0) ==> g.UnitDebt(P) -ensures n > 0 ==> acc(g.UnitDebt(PredTrue!), n/1) -// this is actually necessary, otherwise Gobra cannot prove that Add does not modify the wait mode -ensures (n > 0 && p > 0) ==> g.WaitMode() == old(g.WaitMode()) -decreases _ -func (g *WaitGroup) Add(n int, ghost p perm, ghost P pred()) - -requires g.UnitDebt(PredTrue!) -decreases _ -func (g *WaitGroup) Done() - -requires p > 0 -requires acc(g.WaitGroupP(), p) -requires g.WaitMode() -requires forall i int :: 0 <= i && i < len(P) ==> g.TokenById(P[i], i) -ensures forall i int :: 0 <= i && i < len(P) ==> InjEval(P[i], i) -ensures acc(g.WaitGroupP(), p) -func (g *WaitGroup) Wait(ghost p perm, ghost P seq[pred()]) - -pred (g *WaitGroup) TokenById(ghost P pred(), ghost i int) { - g.Token(P) -} - -pred InjEval(ghost P pred(), ghost i int) { - P() -} - -pred CollectFractions(ghost P seq[pred()], ghost perms seq[perm]) { - len(P) == len(perms) && - // P is injective: - (forall i, j int :: 0 <= i && i < j && j < len(P) ==> P[i] != P[j]) && - (forall i int :: 0 <= i && i < len(P) ==> perms[i] >= 0 && acc(P[i](), perms[i])) -} - -// Special case of the debt redistribution rule -ghost -requires len(P) == len(permsP) -requires len(Q) == len(permsQ) -requires g.UnitDebt(CollectFractions!
) -requires g.UnitDebt(PredTrue!) -ensures g.UnitDebt(CollectFractions!
) && g.UnitDebt(CollectFractions! )
-requires g.UnitDebt(PredTrue!)
-requires forall i int :: 0 <= i && i < len(P) ==> permsP[i] + permsQ[i] == permsR[i]
-ensures g.UnitDebt(CollectFractions! )
-ensures g.UnitDebt(CollectFractions! )
-decreases _
-func (g *WaitGroup) SplitFractions(ghost P seq[pred()], ghost permsP seq[perm], ghost permsQ seq[perm], ghost permsR seq[perm])
diff --git a/verification/dependencies/syscall/syscall_unix.gobra b/verification/dependencies/syscall/syscall_unix.gobra
index 9e46ace9a..6c88fa6bd 100644
--- a/verification/dependencies/syscall/syscall_unix.gobra
+++ b/verification/dependencies/syscall/syscall_unix.gobra
@@ -34,8 +34,7 @@ func (e Errno) Error() string
ghost
decreases
-pure
-func (e Errno) IsDuplicableMem() (res bool) {
+pure func (e Errno) IsDuplicableMem() (res bool) {
return true
}
@@ -65,16 +64,15 @@ func (s *Errno) Set(e error) {
}
ghost
-pure
-requires acc(s.Mem(), _)
+requires s.Mem()
decreases
-func (s *Errno) Get() error {
- return unfolding acc(s.Mem(), _) in *s
+pure func (s *Errno) Get() error {
+ return unfolding s.Mem() in
+ *s
}
ghost
-pure
decreases
-func (s *Errno) CanSet(e error) bool {
+pure func (s *Errno) CanSet(e error) bool {
return typeOf(e) == type[Errno]
}
\ No newline at end of file
diff --git a/verification/io/other_defs.gobra b/verification/io/other_defs.gobra
index 7f600a457..ed47f0600 100644
--- a/verification/io/other_defs.gobra
+++ b/verification/io/other_defs.gobra
@@ -96,9 +96,8 @@ pure func (hf IO_HF) extr_asid() IO_as {
// function 'toab' in Isabelle, originally of type HF_scheme -> aahi_scheme
ghost
-pure
decreases
-func (h IO_HF) Toab() IO_ahi {
+pure func (h IO_HF) Toab() IO_ahi {
return IO_ahi_{h.InIF2, h.EgIF2, h.HVF.extract_asid()}
}
diff --git a/verification/utils/bitwise/bitwise-eqs.gobra b/verification/utils/bitwise/bitwise-eqs.gobra
index 15c753ce0..0e399685e 100644
--- a/verification/utils/bitwise/bitwise-eqs.gobra
+++ b/verification/utils/bitwise/bitwise-eqs.gobra
@@ -26,21 +26,19 @@ decreases
func ByteValue(b byte)
ghost
-pure
ensures 0 <= b & 0x3 && b & 0x3 <= 3
ensures b == 0 ==> res == 0
ensures b == 3 ==> res == 3
ensures b == 4 ==> res == 0
ensures res == b & 0x3
decreases
-func BitAnd3(b int) (res int)
+pure func BitAnd3(b int) (res int)
ghost
-pure
ensures 0 <= b & 0x7 && b & 0x7 <= 7
ensures res == b & 0x7
decreases
-func BitAnd7(b int) (res int)
+pure func BitAnd7(b int) (res int)
ghost
ensures res == b >> 30
diff --git a/verification/utils/definitions/definitions.gobra b/verification/utils/definitions/definitions.gobra
index fe9e35ab3..cdb83b0fc 100644
--- a/verification/utils/definitions/definitions.gobra
+++ b/verification/utils/definitions/definitions.gobra
@@ -115,10 +115,9 @@ func ToDoAfterScionFix(url string)
type PrivateField *int
// ghost
-pure
requires b
decreases
-func Asserting(ghost b bool) bool {
+pure func Asserting(ghost b bool) bool {
return true
}
diff --git a/verification/utils/seqs/seqs.gobra b/verification/utils/seqs/seqs.gobra
index af788fbe8..e2c336a80 100644
--- a/verification/utils/seqs/seqs.gobra
+++ b/verification/utils/seqs/seqs.gobra
@@ -19,12 +19,11 @@ package seqs
import sl "verification/utils/slices"
ghost
-pure
requires 0 <= n
ensures len(res) == n
ensures forall i int :: { res[i] } 0 <= i && i < len(res) ==> !res[i]
decreases _
-func NewSeqBool(n int) (res seq[bool])
+pure func NewSeqBool(n int) (res seq[bool])
ghost
requires size >= 0
diff --git a/verification/utils/slices/slices.gobra b/verification/utils/slices/slices.gobra
index 8c7c4ac15..c5fa392b9 100644
--- a/verification/utils/slices/slices.gobra
+++ b/verification/utils/slices/slices.gobra
@@ -32,12 +32,11 @@ pred Bytes(s []byte, start int, end int) {
forall i int :: { &s[i] } start <= i && i < end ==> acc(&s[i])
}
-pure
-requires acc(Bytes(s, start, end), _)
+requires Bytes(s, start, end)
requires start <= i && i < end
decreases
-func GetByte(s []byte, start int, end int, i int) byte {
- return unfolding acc(Bytes(s, start, end), _) in s[i]
+pure func GetByte(s []byte, start int, end int, i int) byte {
+ return unfolding Bytes(s, start, end) in s[i]
}
ghost
@@ -180,16 +179,35 @@ func PermsImplyIneq(s1 []byte, s2 []byte, p perm) {
fold acc(Bytes(s2, 0, len(s2)), p)
}
-/** Auxiliar definitions Any **/
-ghost
-requires size >= 0
-ensures len(res) == size
-ensures forall i int :: { res[i] } 0 <= i && i < size ==> res[i] == nil
-decreases _
-pure func NewSeq_Any(size int) (res seq[any])
+type Unit struct{}
-// TODO:
-// func ToSeq_Any
-// ResliceC_Any
+// ghost
+requires 0 <= subStart
+requires subStart <= subEnd
+requires subEnd <= cap(s)
+ensures forall i int :: { &s[subStart:subEnd][i] } 0 <= i && i < len(s[subStart:subEnd]) ==>
+ &s[subStart:subEnd][i] == &s[subStart+i]
+decreases
+pure func AssertSliceOverlap(ghost s []byte, ghost subStart int, ghost subEnd int) Unit {
+ return Unit{}
+}
-/** End of Auxiliar definitions Any **/
+// ghost
+requires 0 <= subStart1
+requires subStart1 <= subEnd1
+requires subEnd1 <= cap(s)
+requires 0 <= subStart2
+requires subStart2 <= subEnd2
+requires subEnd2 <= len(s[subStart1:subEnd1])
+ensures forall i int :: { &s[subStart1:subEnd1][subStart2:subEnd2][i] } 0 <= i && i < len(s[subStart1:subEnd1][subStart2:subEnd2]) ==>
+ &s[subStart1:subEnd1][subStart2:subEnd2][i] == &s[subStart1+subStart2+i]
+decreases
+pure func AssertNestedSliceOverlap(
+ ghost s []byte,
+ ghost subStart1 int,
+ ghost subEnd1 int,
+ ghost subStart2 int,
+ ghost subEnd2 int,
+) Unit {
+ return Unit{}
+}
\ No newline at end of file
)
-decreases _
-func (g *WaitGroup) SplitSequence(ghost P seq[pred()], ghost Q seq[pred()], ghost permsP seq[perm], ghost permsQ seq[perm])
-
-// Special case of the debt redistribution rule
-ghost
-requires len(P) == len(permsP)
-requires len(P) == len(permsQ)
-requires len(P) == len(permsR)
-requires forall i int :: 0 <= i && i < len(P) ==> permsP[i] >= 0
-requires forall i int :: 0 <= i && i < len(P) ==> permsQ[i] >= 0
-requires g.UnitDebt(CollectFractions!